hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1561944 - in /hadoop/common/branches/HDFS-5535/hadoop-common-project: hadoop-auth/src/site/apt/ hadoop-common/ hadoop-common/dev-support/ hadoop-common/src/main/java/ hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/ hadoop-common...
Date Tue, 28 Jan 2014 05:37:14 GMT
Author: szetszwo
Date: Tue Jan 28 05:37:13 2014
New Revision: 1561944

URL: http://svn.apache.org/r1561944
Log:
Merge r1555021 through r1561943 from trunk.

Added:
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
      - copied unchanged from r1561943, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
      - copied unchanged from r1561943, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
Modified:
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/InterfaceClassification.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm
    hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm Tue Jan 28 05:37:13 2014
@@ -24,8 +24,7 @@ Configuration
 
 * Server Side Configuration Setup
 
-  The {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationFilter.html}
-  AuthenticationFilter filter}} is Hadoop Auth's server side component.
+  The AuthenticationFilter filter is Hadoop Auth's server side component.
 
   This filter must be configured in front of all the web application resources
   that required authenticated requests. For example:
@@ -46,9 +45,7 @@ Configuration
     must start with the prefix. The default value is no prefix.
 
   * <<<[PREFIX.]type>>>: the authentication type keyword (<<<simple>>> or
-    <<<kerberos>>>) or a
-    {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationHandler.html}
-    Authentication handler implementation}}.
+    <<<kerberos>>>) or a Authentication handler implementation.
 
   * <<<[PREFIX.]signature.secret>>>: The secret to SHA-sign the generated
     authentication tokens. If a secret is not provided a random secret is

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm Tue Jan 28 05:37:13 2014
@@ -52,7 +52,3 @@ Hadoop Auth, Java HTTP SPNEGO ${project.
 
   * {{{./BuildingIt.html}Building It}}
 
-  * {{{./apidocs/index.html}JavaDocs}}
-
-  * {{{./dependencies.html}Dependencies}}
-

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/CHANGES.txt Tue Jan 28 05:37:13 2014
@@ -536,6 +536,15 @@ Release 2.4.0 - UNRELEASED
     HADOOP-10252. HttpServer can't start if hostname is not specified. (Jimmy
     Xiang via atm)
 
+    HADOOP-10203. Connection leak in
+    Jets3tNativeFileSystemStore#retrieveMetadata. (Andrei Savu via atm)
+
+    HADOOP-10250. VersionUtil returns wrong value when comparing two versions.
+    (Yongjun Zhang via atm)
+
+    HADOOP-10288. Explicit reference to Log4JLogger breaks non-log4j users
+    (todd)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -559,6 +568,12 @@ Release 2.3.0 - UNRELEASED
     HADOOP-10248. Property name should be included in the exception where property value 
     is null (Akira AJISAKA via umamahesh)
 
+    HADOOP-10086. User document for authentication in secure cluster.
+    (Masatake Iwasaki via Arpit Agarwal)
+
+    HADOOP-10274 Lower the logging level from ERROR to WARN for UGI.doAs method
+    (Takeshi Miao via stack)
+
   OPTIMIZATIONS
 
     HADOOP-10142. Avoid groups lookup for unprivileged users such as "dr.who"
@@ -637,6 +652,12 @@ Release 2.3.0 - UNRELEASED
     HADOOP-10167. Mark hadoop-common source as UTF-8 in Maven pom files / refactoring
     (Mikhail Antonov via cos)
 
+    HADOOP-9982. Fix dead links in hadoop site docs. (Akira Ajisaka via Arpit
+    Agarwal)
+
+    HADOOP-10212. Incorrect compile command in Native Library document.
+    (Akira Ajisaka via Arpit Agarwal)
+
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1561511-1561943

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml Tue Jan 28 05:37:13 2014
@@ -364,4 +364,11 @@
       <Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
     </Match>
 
+     <!-- code from maven source, null value is checked at callee side. -->
+     <Match>
+       <Class name="org.apache.hadoop.util.ComparableVersion$ListItem" />
+       <Method name="compareTo" />
+       <Bug code="NP" />
+     </Match>
+
 </FindBugsFilter>

Propchange: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1561511-1561943

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java Tue Jan 28 05:37:13 2014
@@ -110,23 +110,29 @@ class Jets3tNativeFileSystemStore implem
       handleS3ServiceException(e);
     }
   }
-  
+
   @Override
   public FileMetadata retrieveMetadata(String key) throws IOException {
+    StorageObject object = null;
     try {
       if(LOG.isDebugEnabled()) {
         LOG.debug("Getting metadata for key: " + key + " from bucket:" + bucket.getName());
       }
-      S3Object object = s3Service.getObject(bucket.getName(), key);
+      object = s3Service.getObjectDetails(bucket.getName(), key);
       return new FileMetadata(key, object.getContentLength(),
           object.getLastModifiedDate().getTime());
-    } catch (S3ServiceException e) {
+
+    } catch (ServiceException e) {
       // Following is brittle. Is there a better way?
-      if (e.getS3ErrorCode().matches("NoSuchKey")) {
+      if ("NoSuchKey".equals(e.getErrorCode())) {
         return null; //return null if key not found
       }
-      handleS3ServiceException(e);
+      handleServiceException(e);
       return null; //never returned - keep compiler happy
+    } finally {
+      if (object != null) {
+        object.closeDataInputStream();
+      }
     }
   }
 

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java Tue Jan 28 05:37:13 2014
@@ -53,7 +53,17 @@ public class HttpRequestLog {
     String appenderName = name + "requestlog";
     Log logger = LogFactory.getLog(loggerName);
 
-    if (logger instanceof Log4JLogger) {
+    boolean isLog4JLogger;;
+    try {
+      isLog4JLogger = logger instanceof Log4JLogger;
+    } catch (NoClassDefFoundError err) {
+      // In some dependent projects, log4j may not even be on the classpath at
+      // runtime, in which case the above instanceof check will throw
+      // NoClassDefFoundError.
+      LOG.debug("Could not load Log4JLogger class", err);
+      isLog4JLogger = false;
+    }
+    if (isLog4JLogger) {
       Log4JLogger httpLog4JLog = (Log4JLogger)logger;
       Logger httpLogger = httpLog4JLog.getLogger();
       Appender appender = null;

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java Tue Jan 28 05:37:13 2014
@@ -1560,7 +1560,7 @@ public class UserGroupInformation {
       return Subject.doAs(subject, action);
     } catch (PrivilegedActionException pae) {
       Throwable cause = pae.getCause();
-      LOG.error("PriviledgedActionException as:"+this+" cause:"+cause);
+      LOG.warn("PriviledgedActionException as:"+this+" cause:"+cause);
       if (cause instanceof IOException) {
         throw (IOException) cause;
       } else if (cause instanceof Error) {

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java Tue Jan 28 05:37:13 2014
@@ -17,55 +17,17 @@
  */
 package org.apache.hadoop.util;
 
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.google.common.collect.ComparisonChain;
-
+/**
+ * A wrapper class to maven's ComparableVersion class, to comply
+ * with maven's version name string convention 
+ */
 @InterfaceAudience.Private
 public abstract class VersionUtil {
-  
-  private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)");
-
-  /**
-   * Suffix added by maven for nightly builds and other snapshot releases.
-   * These releases are considered to precede the non-SNAPSHOT version
-   * with the same version number.
-   */
-  private static final String SNAPSHOT_SUFFIX = "-SNAPSHOT";
-
   /**
-   * This function splits the two versions on &quot;.&quot; and performs a
-   * naturally-ordered comparison of the resulting components. For example, the
-   * version string "0.3" is considered to precede "0.20", despite the fact that
-   * lexical comparison would consider "0.20" to precede "0.3". This method of
-   * comparison is similar to the method used by package versioning systems like
-   * deb and RPM.
-   * 
-   * Version components are compared numerically whenever possible, however a
-   * version component can contain non-numeric characters. When a non-numeric
-   * group of characters is found in a version component, this group is compared
-   * with the similarly-indexed group in the other version component. If the
-   * other group is numeric, then the numeric group is considered to precede the
-   * non-numeric group. If both groups are non-numeric, then a lexical
-   * comparison is performed.
-   * 
-   * If two versions have a different number of components, then only the lower
-   * number of components are compared. If those components are identical
-   * between the two versions, then the version with fewer components is
-   * considered to precede the version with more components.
-   * 
-   * In addition to the above rules, there is one special case: maven SNAPSHOT
-   * releases are considered to precede a non-SNAPSHOT release with an
-   * otherwise identical version number. For example, 2.0-SNAPSHOT precedes
-   * 2.0.
-   * 
-   * This function returns a negative integer if version1 precedes version2, a
-   * positive integer if version2 precedes version1, and 0 if and only if the
-   * two versions' components are identical in value and cardinality.
-   * 
+   * Compares two version name strings using maven's ComparableVersion class.
+   *
    * @param version1
    *          the first version to compare
    * @param version2
@@ -75,58 +37,8 @@ public abstract class VersionUtil {
    *         versions are equal.
    */
   public static int compareVersions(String version1, String version2) {
-    boolean isSnapshot1 = version1.endsWith(SNAPSHOT_SUFFIX);
-    boolean isSnapshot2 = version2.endsWith(SNAPSHOT_SUFFIX);
-    version1 = stripSnapshotSuffix(version1);
-    version2 = stripSnapshotSuffix(version2);
-    
-    String[] version1Parts = version1.split("\\.");
-    String[] version2Parts = version2.split("\\.");
-    
-    for (int i = 0; i < version1Parts.length && i < version2Parts.length; i++) {
-      String component1 = version1Parts[i];
-      String component2 = version2Parts[i];
-      if (!component1.equals(component2)) {
-        Matcher matcher1 = COMPONENT_GROUPS.matcher(component1);
-        Matcher matcher2 = COMPONENT_GROUPS.matcher(component2);
-        
-        while (matcher1.find() && matcher2.find()) {
-          String group1 = matcher1.group();
-          String group2 = matcher2.group();
-          if (!group1.equals(group2)) {
-            if (isNumeric(group1) && isNumeric(group2)) {
-              return Integer.parseInt(group1) - Integer.parseInt(group2);
-            } else if (!isNumeric(group1) && !isNumeric(group2)) {
-              return group1.compareTo(group2);
-            } else {
-              return isNumeric(group1) ? -1 : 1;
-            }
-          }
-        }
-        return component1.length() - component2.length();
-      }
-    }
-    
-    return ComparisonChain.start()
-      .compare(version1Parts.length, version2Parts.length)
-      .compare(isSnapshot2, isSnapshot1)
-      .result();
-  }
-  
-  private static String stripSnapshotSuffix(String version) {
-    if (version.endsWith(SNAPSHOT_SUFFIX)) {
-      return version.substring(0, version.length() - SNAPSHOT_SUFFIX.length());
-    } else {
-      return version;
-    }
-  }
-
-  private static boolean isNumeric(String s) {
-    try {
-      Integer.parseInt(s);
-      return true;
-    } catch (NumberFormatException nfe) {
-      return false;
-    }
+    ComparableVersion v1 = new ComparableVersion(version1);
+    ComparableVersion v2 = new ComparableVersion(version2);
+    return v1.compareTo(v2);
   }
 }

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm Tue Jan 28 05:37:13 2014
@@ -18,8 +18,6 @@
 
 Hadoop MapReduce Next Generation - CLI MiniCluster.
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Purpose}
@@ -42,7 +40,8 @@ Hadoop MapReduce Next Generation - CLI M
 $ mvn clean install -DskipTests
 $ mvn package -Pdist -Dtar -DskipTests -Dmaven.javadoc.skip
 +---+
-  <<NOTE:>> You will need protoc 2.5.0 installed.
+  <<NOTE:>> You will need {{{http://code.google.com/p/protobuf/}protoc 2.5.0}}
+            installed.
 
   The tarball should be available in <<<hadoop-dist/target/>>> directory. 
 

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm Tue Jan 28 05:37:13 2014
@@ -16,8 +16,6 @@
   ---
   ${maven.build.timestamp}
 
-  \[ {{{../index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 Hadoop MapReduce Next Generation - Cluster Setup
@@ -29,7 +27,7 @@ Hadoop MapReduce Next Generation - Clust
   with thousands of nodes.
 
   To play with Hadoop, you may first want to install it on a single
-  machine (see {{{SingleCluster}Single Node Setup}}).
+  machine (see {{{./SingleCluster.html}Single Node Setup}}).
 
 * {Prerequisites}
 
@@ -571,440 +569,6 @@ $ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh 
 $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR
 ----    	
 
-* {Running Hadoop in Secure Mode}
-
-  This section deals with important parameters to be specified in
-  to run Hadoop in <<secure mode>> with strong, Kerberos-based
-  authentication.
-
-  * <<<User Accounts for Hadoop Daemons>>>
-
-  Ensure that HDFS and YARN daemons run as different Unix users, for e.g.
-  <<<hdfs>>> and <<<yarn>>>. Also, ensure that the MapReduce JobHistory
-  server runs as user <<<mapred>>>.
-
-  It's recommended to have them share a Unix group, for e.g. <<<hadoop>>>.
-
-*---------------+----------------------------------------------------------------------+
-|| User:Group   || Daemons                                                             |
-*---------------+----------------------------------------------------------------------+
-| hdfs:hadoop   | NameNode, Secondary NameNode, Checkpoint Node, Backup Node, DataNode |
-*---------------+----------------------------------------------------------------------+
-| yarn:hadoop   | ResourceManager, NodeManager                                         |
-*---------------+----------------------------------------------------------------------+
-| mapred:hadoop | MapReduce JobHistory Server                                          |
-*---------------+----------------------------------------------------------------------+
-
-  * <<<Permissions for both HDFS and local fileSystem paths>>>
-
-  The following table lists various paths on HDFS and local filesystems (on
-  all nodes) and recommended permissions:
-
-*-------------------+-------------------+------------------+------------------+
-|| Filesystem       || Path             || User:Group      || Permissions     |
-*-------------------+-------------------+------------------+------------------+
-| local | <<<dfs.namenode.name.dir>>> | hdfs:hadoop | drwx------ |
-*-------------------+-------------------+------------------+------------------+
-| local | <<<dfs.datanode.data.dir>>> | hdfs:hadoop | drwx------ |
-*-------------------+-------------------+------------------+------------------+
-| local | $HADOOP_LOG_DIR | hdfs:hadoop | drwxrwxr-x |
-*-------------------+-------------------+------------------+------------------+
-| local | $YARN_LOG_DIR | yarn:hadoop | drwxrwxr-x |
-*-------------------+-------------------+------------------+------------------+
-| local | <<<yarn.nodemanager.local-dirs>>> | yarn:hadoop | drwxr-xr-x |
-*-------------------+-------------------+------------------+------------------+
-| local | <<<yarn.nodemanager.log-dirs>>> | yarn:hadoop | drwxr-xr-x |
-*-------------------+-------------------+------------------+------------------+
-| local | container-executor | root:hadoop | --Sr-s--- |
-*-------------------+-------------------+------------------+------------------+
-| local | <<<conf/container-executor.cfg>>> | root:hadoop | r-------- |
-*-------------------+-------------------+------------------+------------------+
-| hdfs | / | hdfs:hadoop | drwxr-xr-x |
-*-------------------+-------------------+------------------+------------------+
-| hdfs | /tmp | hdfs:hadoop | drwxrwxrwxt |
-*-------------------+-------------------+------------------+------------------+
-| hdfs | /user | hdfs:hadoop | drwxr-xr-x |
-*-------------------+-------------------+------------------+------------------+
-| hdfs | <<<yarn.nodemanager.remote-app-log-dir>>> | yarn:hadoop | drwxrwxrwxt |
-*-------------------+-------------------+------------------+------------------+
-| hdfs | <<<mapreduce.jobhistory.intermediate-done-dir>>> | mapred:hadoop | |
-| | | | drwxrwxrwxt |
-*-------------------+-------------------+------------------+------------------+
-| hdfs | <<<mapreduce.jobhistory.done-dir>>> | mapred:hadoop | |
-| | | | drwxr-x--- |
-*-------------------+-------------------+------------------+------------------+
-
-  * Kerberos Keytab files
-
-    * HDFS
-
-    The NameNode keytab file, on the NameNode host, should look like the
-    following:
-
-----
-$ /usr/kerberos/bin/klist -e -k -t /etc/security/keytab/nn.service.keytab
-Keytab name: FILE:/etc/security/keytab/nn.service.keytab
-KVNO Timestamp         Principal
-   4 07/18/11 21:08:09 nn/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 nn/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 nn/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-----
-
-    The Secondary NameNode keytab file, on that host, should look like the
-    following:
-
-----
-$ /usr/kerberos/bin/klist -e -k -t /etc/security/keytab/sn.service.keytab
-Keytab name: FILE:/etc/security/keytab/sn.service.keytab
-KVNO Timestamp         Principal
-   4 07/18/11 21:08:09 sn/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 sn/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 sn/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-----
-
-    The DataNode keytab file, on each host, should look like the following:
-
-----
-$ /usr/kerberos/bin/klist -e -k -t /etc/security/keytab/dn.service.keytab
-Keytab name: FILE:/etc/security/keytab/dn.service.keytab
-KVNO Timestamp         Principal
-   4 07/18/11 21:08:09 dn/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 dn/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 dn/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-----
-
-    * YARN
-
-    The ResourceManager keytab file, on the ResourceManager host, should look
-    like the following:
-
-----
-$ /usr/kerberos/bin/klist -e -k -t /etc/security/keytab/rm.service.keytab
-Keytab name: FILE:/etc/security/keytab/rm.service.keytab
-KVNO Timestamp         Principal
-   4 07/18/11 21:08:09 rm/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 rm/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 rm/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-----
-
-    The NodeManager keytab file, on each host, should look like the following:
-
-----
-$ /usr/kerberos/bin/klist -e -k -t /etc/security/keytab/nm.service.keytab
-Keytab name: FILE:/etc/security/keytab/nm.service.keytab
-KVNO Timestamp         Principal
-   4 07/18/11 21:08:09 nm/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 nm/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 nm/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-----
-
-    * MapReduce JobHistory Server
-
-    The MapReduce JobHistory Server keytab file, on that host, should look
-    like the following:
-
-----
-$ /usr/kerberos/bin/klist -e -k -t /etc/security/keytab/jhs.service.keytab
-Keytab name: FILE:/etc/security/keytab/jhs.service.keytab
-KVNO Timestamp         Principal
-   4 07/18/11 21:08:09 jhs/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 jhs/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 jhs/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
-   4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
-----
-
-** Configuration in Secure Mode
-
-  * <<<conf/core-site.xml>>>
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<hadoop.security.authentication>>> | <kerberos> | <simple> is non-secure. |
-*-------------------------+-------------------------+------------------------+
-| <<<hadoop.security.authorization>>> | <true> | |
-| | | Enable RPC service-level authorization. |
-*-------------------------+-------------------------+------------------------+
-
-  * <<<conf/hdfs-site.xml>>>
-
-    * Configurations for NameNode:
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.block.access.token.enable>>> | <true> |  |
-| | | Enable HDFS block access tokens for secure operations. |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.https.enable>>> | <true> | |
-| | | This value is deprecated. Use dfs.http.policy |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.http.policy>>> | <HTTP_ONLY> or <HTTPS_ONLY> or <HTTP_AND_HTTPS> | |
-| | | HTTPS_ONLY turns off http access |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.https-address>>> | <nn_host_fqdn:50470> | |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.https.port>>> | <50470> | |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.keytab.file>>> | </etc/security/keytab/nn.service.keytab> | |
-| | | Kerberos keytab file for the NameNode. |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.kerberos.principal>>> | nn/_HOST@REALM.TLD | |
-| | | Kerberos principal name for the NameNode. |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.kerberos.https.principal>>> | host/_HOST@REALM.TLD | |
-| | | HTTPS Kerberos principal name for the NameNode. |
-*-------------------------+-------------------------+------------------------+
-
-    * Configurations for Secondary NameNode:
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.secondary.http-address>>> | <c_nn_host_fqdn:50090> | |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.secondary.https-port>>> | <50470> | |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.secondary.keytab.file>>> | | |
-| | </etc/security/keytab/sn.service.keytab> | |
-| | | Kerberos keytab file for the NameNode. |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.secondary.kerberos.principal>>> | sn/_HOST@REALM.TLD | |
-| | | Kerberos principal name for the Secondary NameNode. |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.namenode.secondary.kerberos.https.principal>>> | | |
-| | host/_HOST@REALM.TLD | |
-| | | HTTPS Kerberos principal name for the Secondary NameNode. |
-*-------------------------+-------------------------+------------------------+
-
-    * Configurations for DataNode:
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.datanode.data.dir.perm>>> | 700 | |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.datanode.address>>> | <0.0.0.0:2003> | |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.datanode.https.address>>> | <0.0.0.0:2005> | |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.datanode.keytab.file>>> | </etc/security/keytab/dn.service.keytab> | |
-| | | Kerberos keytab file for the DataNode. |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.datanode.kerberos.principal>>> | dn/_HOST@REALM.TLD | |
-| | | Kerberos principal name for the DataNode. |
-*-------------------------+-------------------------+------------------------+
-| <<<dfs.datanode.kerberos.https.principal>>> | | |
-| | host/_HOST@REALM.TLD | |
-| | | HTTPS Kerberos principal name for the DataNode. |
-*-------------------------+-------------------------+------------------------+
-
-  * <<<conf/yarn-site.xml>>>
-
-    * WebAppProxy
-
-    The <<<WebAppProxy>>> provides a proxy between the web applications
-    exported by an application and an end user.  If security is enabled
-    it will warn users before accessing a potentially unsafe web application.
-    Authentication and authorization using the proxy is handled just like
-    any other privileged web application.
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.web-proxy.address>>> | | |
-| | <<<WebAppProxy>>> host:port for proxy to AM web apps. | |
-| | | <host:port> if this is the same as <<<yarn.resourcemanager.webapp.address>>>|
-| | | or it is not defined then the <<<ResourceManager>>> will run the proxy|
-| | | otherwise a standalone proxy server will need to be launched.|
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.web-proxy.keytab>>> | | |
-| | </etc/security/keytab/web-app.service.keytab> | |
-| | | Kerberos keytab file for the WebAppProxy. |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.web-proxy.principal>>> | wap/_HOST@REALM.TLD | |
-| | | Kerberos principal name for the WebAppProxy. |
-*-------------------------+-------------------------+------------------------+
-
-    * LinuxContainerExecutor
-
-    A <<<ContainerExecutor>>> used by YARN framework which define how any
-    <container> launched and controlled.
-
-    The following are the available in Hadoop YARN:
-
-*--------------------------------------+--------------------------------------+
-|| ContainerExecutor                   || Description                         |
-*--------------------------------------+--------------------------------------+
-| <<<DefaultContainerExecutor>>>             | |
-| | The default executor which YARN uses to manage container execution. |
-| | The container process has the same Unix user as the NodeManager.  |
-*--------------------------------------+--------------------------------------+
-| <<<LinuxContainerExecutor>>>               | |
-| | Supported only on GNU/Linux, this executor runs the containers as either the |
-| | YARN user who submitted the application (when full security is enabled) or |
-| | as a dedicated user (defaults to nobody) when full security is not enabled. |
-| | When full security is enabled, this executor requires all user accounts to be |
-| | created on the cluster nodes where the containers are launched. It uses |
-| | a <setuid> executable that is included in the Hadoop distribution. |
-| | The NodeManager uses this executable to launch and kill containers. |
-| | The setuid executable switches to the user who has submitted the |
-| | application and launches or kills the containers. For maximum security, |
-| | this executor sets up restricted permissions and user/group ownership of |
-| | local files and directories used by the containers such as the shared |
-| | objects, jars, intermediate files, log files etc. Particularly note that, |
-| | because of this, except the application owner and NodeManager, no other |
-| | user can access any of the local files/directories including those |
-| | localized as part of the distributed cache. |
-*--------------------------------------+--------------------------------------+
-
-    To build the LinuxContainerExecutor executable run:
-
-----
- $ mvn package -Dcontainer-executor.conf.dir=/etc/hadoop/
-----
-
-    The path passed in <<<-Dcontainer-executor.conf.dir>>> should be the
-    path on the cluster nodes where a configuration file for the setuid
-    executable should be located. The executable should be installed in
-    $HADOOP_YARN_HOME/bin.
-
-    The executable must have specific permissions: 6050 or --Sr-s---
-    permissions user-owned by <root> (super-user) and group-owned by a
-    special group (e.g. <<<hadoop>>>) of which the NodeManager Unix user is
-    the group member and no ordinary application user is. If any application
-    user belongs to this special group, security will be compromised. This
-    special group name should be specified for the configuration property
-    <<<yarn.nodemanager.linux-container-executor.group>>> in both
-    <<<conf/yarn-site.xml>>> and <<<conf/container-executor.cfg>>>.
-
-    For example, let's say that the NodeManager is run as user <yarn> who is
-    part of the groups users and <hadoop>, any of them being the primary group.
-    Let also be that <users> has both <yarn> and another user
-    (application submitter) <alice> as its members, and <alice> does not
-    belong to <hadoop>. Going by the above description, the setuid/setgid
-    executable should be set 6050 or --Sr-s--- with user-owner as <yarn> and
-    group-owner as <hadoop> which has <yarn> as its member (and not <users>
-    which has <alice> also as its member besides <yarn>).
-
-    The LinuxTaskController requires that paths including and leading up to
-    the directories specified in <<<yarn.nodemanager.local-dirs>>> and
-    <<<yarn.nodemanager.log-dirs>>> to be set 755 permissions as described
-    above in the table on permissions on directories.
-
-      * <<<conf/container-executor.cfg>>>
-
-    The executable requires a configuration file called
-    <<<container-executor.cfg>>> to be present in the configuration
-    directory passed to the mvn target mentioned above.
-
-    The configuration file must be owned by the user running NodeManager
-    (user <<<yarn>>> in the above example), group-owned by anyone and
-    should have the permissions 0400 or r--------.
-
-    The executable requires following configuration items to be present
-    in the <<<conf/container-executor.cfg>>> file. The items should be
-    mentioned as simple key=value pairs, one per-line:
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.nodemanager.linux-container-executor.group>>> | <hadoop> | |
-| | | Unix group of the NodeManager. The group owner of the |
-| | |<container-executor> binary should be this group. Should be same as the |
-| | | value with which the NodeManager is configured. This configuration is |
-| | | required for validating the secure access of the <container-executor> |
-| | | binary. |
-*-------------------------+-------------------------+------------------------+
-| <<<banned.users>>> | hfds,yarn,mapred,bin | Banned users. |
-*-------------------------+-------------------------+------------------------+
-| <<<allowed.system.users>>> | foo,bar | Allowed system users. |
-*-------------------------+-------------------------+------------------------+
-| <<<min.user.id>>> | 1000 | Prevent other super-users. |
-*-------------------------+-------------------------+------------------------+
-
-      To re-cap, here are the local file-sysytem permissions required for the
-      various paths related to the <<<LinuxContainerExecutor>>>:
-
-*-------------------+-------------------+------------------+------------------+
-|| Filesystem       || Path             || User:Group      || Permissions     |
-*-------------------+-------------------+------------------+------------------+
-| local | container-executor | root:hadoop | --Sr-s--- |
-*-------------------+-------------------+------------------+------------------+
-| local | <<<conf/container-executor.cfg>>> | root:hadoop | r-------- |
-*-------------------+-------------------+------------------+------------------+
-| local | <<<yarn.nodemanager.local-dirs>>> | yarn:hadoop | drwxr-xr-x |
-*-------------------+-------------------+------------------+------------------+
-| local | <<<yarn.nodemanager.log-dirs>>> | yarn:hadoop | drwxr-xr-x |
-*-------------------+-------------------+------------------+------------------+
-
-      * Configurations for ResourceManager:
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.resourcemanager.keytab>>> | | |
-| | </etc/security/keytab/rm.service.keytab> | |
-| | | Kerberos keytab file for the ResourceManager. |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.resourcemanager.principal>>> | rm/_HOST@REALM.TLD | |
-| | | Kerberos principal name for the ResourceManager. |
-*-------------------------+-------------------------+------------------------+
-
-      * Configurations for NodeManager:
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.nodemanager.keytab>>> | </etc/security/keytab/nm.service.keytab> | |
-| | | Kerberos keytab file for the NodeManager. |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.nodemanager.principal>>> | nm/_HOST@REALM.TLD | |
-| | | Kerberos principal name for the NodeManager. |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.nodemanager.container-executor.class>>> | | |
-| | <<<org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor>>> |
-| | | Use LinuxContainerExecutor. |
-*-------------------------+-------------------------+------------------------+
-| <<<yarn.nodemanager.linux-container-executor.group>>> | <hadoop> | |
-| | | Unix group of the NodeManager. |
-*-------------------------+-------------------------+------------------------+
-
-    * <<<conf/mapred-site.xml>>>
-
-      * Configurations for MapReduce JobHistory Server:
-
-*-------------------------+-------------------------+------------------------+
-|| Parameter              || Value                  || Notes                 |
-*-------------------------+-------------------------+------------------------+
-| <<<mapreduce.jobhistory.address>>> | | |
-| | MapReduce JobHistory Server <host:port> | Default port is 10020. |
-*-------------------------+-------------------------+------------------------+
-| <<<mapreduce.jobhistory.keytab>>> | |
-| | </etc/security/keytab/jhs.service.keytab> | |
-| | | Kerberos keytab file for the MapReduce JobHistory Server. |
-*-------------------------+-------------------------+------------------------+
-| <<<mapreduce.jobhistory.principal>>> | jhs/_HOST@REALM.TLD | |
-| | | Kerberos principal name for the MapReduce JobHistory Server. |
-*-------------------------+-------------------------+------------------------+
-
 
 * {Operating the Hadoop Cluster}
 

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm Tue Jan 28 05:37:13 2014
@@ -44,8 +44,9 @@ Overview
 Generic Options
 
    The following options are supported by {{dfsadmin}}, {{fs}}, {{fsck}},
-   {{job}} and {{fetchdt}}. Applications should implement {{{some_useful_url}Tool}} to support
-   {{{another_useful_url}GenericOptions}}.
+   {{job}} and {{fetchdt}}. Applications should implement 
+   {{{../../api/org/apache/hadoop/util/Tool.html}Tool}} to support
+   GenericOptions.
 
 *------------------------------------------------+-----------------------------+
 ||            GENERIC_OPTION                     ||            Description
@@ -123,7 +124,8 @@ User Commands
 
 * <<<fsck>>>
 
-   Runs a HDFS filesystem checking utility. See {{Fsck}} for more info.
+   Runs a HDFS filesystem checking utility.
+   See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
 
    Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]]>>>
 
@@ -149,7 +151,8 @@ User Commands
 
 * <<<fetchdt>>>
 
-   Gets Delegation Token from a NameNode. See {{fetchdt}} for more info.
+   Gets Delegation Token from a NameNode.
+   See {{{../hadoop-hdfs/HdfsUserGuide.html#fetchdt}fetchdt}} for more info.
 
    Usage: <<<hadoop fetchdt [GENERIC_OPTIONS] [--webservice <namenode_http_addr>] <path> >>>
 
@@ -302,7 +305,8 @@ Administration Commands
 * <<<balancer>>>
 
    Runs a cluster balancing utility. An administrator can simply press Ctrl-C
-   to stop the rebalancing process. See Rebalancer for more details.
+   to stop the rebalancing process. See
+   {{{../hadoop-hdfs/HdfsUserGuide.html#Rebalancer}Rebalancer}} for more details.
 
    Usage: <<<hadoop balancer [-threshold <threshold>]>>>
 
@@ -445,7 +449,7 @@ Administration Commands
 * <<<namenode>>>
 
    Runs the namenode. More info about the upgrade, rollback and finalize is
-   at Upgrade Rollback
+   at {{{../hadoop-hdfs/HdfsUserGuide.html#Upgrade_and_Rollback}Upgrade Rollback}}.
 
    Usage: <<<hadoop namenode [-format] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint]>>>
 
@@ -474,8 +478,9 @@ Administration Commands
 
 * <<<secondarynamenode>>>
 
-   Runs the HDFS secondary namenode. See Secondary Namenode for more
-   info.
+   Runs the HDFS secondary namenode.
+   See {{{../hadoop-hdfs/HdfsUserGuide.html#Secondary_NameNode}Secondary Namenode}}
+   for more info.
 
    Usage: <<<hadoop secondarynamenode [-checkpoint [force]] | [-geteditsize]>>>
 

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm Tue Jan 28 05:37:13 2014
@@ -233,9 +233,10 @@ hand-in-hand to address this.
 
     * In particular for MapReduce applications, the developer community will 
       try our best to support provide binary compatibility across major 
-      releases e.g. applications using org.apache.hadop.mapred.* APIs are 
-      supported compatibly across hadoop-1.x and hadoop-2.x. See 
-      {{{../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html}
+      releases e.g. applications using org.apache.hadoop.mapred.
+      
+    * APIs are supported compatibly across hadoop-1.x and hadoop-2.x. See 
+      {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html}
       Compatibility for MapReduce applications between hadoop-1.x and hadoop-2.x}} 
       for more details.
 
@@ -248,13 +249,13 @@ hand-in-hand to address this.
 
   * {{{../hadoop-hdfs/WebHDFS.html}WebHDFS}} - Stable
 
-  * {{{../hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html}ResourceManager}}
+  * {{{../../hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html}ResourceManager}}
 
-  * {{{../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html}NodeManager}}
+  * {{{../../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html}NodeManager}}
 
-  * {{{../hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html}MR Application Master}}
+  * {{{../../hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html}MR Application Master}}
 
-  * {{{../hadoop-yarn/hadoop-yarn-site/HistoryServerRest.html}History Server}}
+  * {{{../../hadoop-yarn/hadoop-yarn-site/HistoryServerRest.html}History Server}}
   
 *** Policy
     
@@ -512,7 +513,8 @@ hand-in-hand to address this.
     {{{https://issues.apache.org/jira/browse/HADOOP-9517}HADOOP-9517}}
 
   * Binary compatibility for MapReduce end-user applications between hadoop-1.x and hadoop-2.x -
-    {{{../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html}MapReduce Compatibility between hadoop-1.x and hadoop-2.x}}
+    {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html}
+    MapReduce Compatibility between hadoop-1.x and hadoop-2.x}}
 
   * Annotations for interfaces as per interface classification
     schedule -

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm Tue Jan 28 05:37:13 2014
@@ -88,7 +88,7 @@ chgrp
 
    Change group association of files. The user must be the owner of files, or
    else a super-user. Additional information is in the
-   {{{betterurl}Permissions Guide}}.
+   {{{../hadoop-hdfs/HdfsPermissionsGuide.html}Permissions Guide}}.
 
    Options
 
@@ -101,7 +101,7 @@ chmod
    Change the permissions of files. With -R, make the change recursively
    through the directory structure. The user must be the owner of the file, or
    else a super-user. Additional information is in the
-   {{{betterurl}Permissions Guide}}.
+   {{{../hadoop-hdfs/HdfsPermissionsGuide.html}Permissions Guide}}.
 
    Options
 
@@ -112,7 +112,7 @@ chown
    Usage: <<<hdfs dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]>>>
 
    Change the owner of files. The user must be a super-user. Additional information
-   is in the {{{betterurl}Permissions Guide}}.
+   is in the {{{../hadoop-hdfs/HdfsPermissionsGuide.html}Permissions Guide}}.
 
    Options
 
@@ -210,8 +210,8 @@ expunge
 
    Usage: <<<hdfs dfs -expunge>>>
 
-   Empty the Trash. Refer to the {{{betterurl}HDFS Architecture Guide}} for
-   more information on the Trash feature.
+   Empty the Trash. Refer to the {{{../hadoop-hdfs/HdfsDesign.html}
+   HDFS Architecture Guide}} for more information on the Trash feature.
 
 get
 
@@ -439,7 +439,9 @@ test
    Options:
 
      * The -e option will check to see if the file exists, returning 0 if true.
+
      * The -z option will check to see if the file is zero length, returning 0 if true.
+
      * The -d option will check to see if the path is directory, returning 0 if true.
 
    Example:

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/InterfaceClassification.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/InterfaceClassification.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/InterfaceClassification.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/InterfaceClassification.apt.vm Tue Jan 28 05:37:13 2014
@@ -18,8 +18,6 @@
 
 Hadoop Interface Taxonomy: Audience and Stability Classification
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Motivation

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm Tue Jan 28 05:37:13 2014
@@ -117,23 +117,19 @@ Native Libraries Guide
      * zlib-development package (stable version >= 1.2.0)
 
    Once you installed the prerequisite packages use the standard hadoop
-   build.xml file and pass along the compile.native flag (set to true) to
-   build the native hadoop library:
+   pom.xml file and pass along the native flag to build the native hadoop 
+   library:
 
 ----
-   $ ant -Dcompile.native=true <target>
+   $ mvn package -Pdist,native -Dskiptests -Dtar
 ----
 
    You should see the newly-built library in:
 
 ----
-   $ build/native/<platform>/lib
+   $ hadoop-dist/target/hadoop-${project.version}/lib/native
 ----
 
-   where <platform> is a combination of the system-properties:
-   ${os.name}-${os.arch}-${sun.arch.data.model} (for example,
-   Linux-i386-32).
-
    Please note the following:
 
      * It is mandatory to install both the zlib and gzip development

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm Tue Jan 28 05:37:13 2014
@@ -29,8 +29,10 @@ Service Level Authorization Guide
 
    Make sure Hadoop is installed, configured and setup correctly. For more
    information see:
-     * Single Node Setup for first-time users.
-     * Cluster Setup for large, distributed clusters.
+
+     * {{{./SingleCluster.html}Single Node Setup}} for first-time users.
+
+     * {{{./ClusterSetup.html}Cluster Setup}} for large, distributed clusters.
 
 * Overview
 

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm Tue Jan 28 05:37:13 2014
@@ -18,8 +18,6 @@
 
 Hadoop MapReduce Next Generation - Setting up a Single Node Cluster.
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Mapreduce Tarball
@@ -32,7 +30,8 @@ $ mvn clean install -DskipTests
 $ cd hadoop-mapreduce-project
 $ mvn clean install assembly:assembly -Pnative
 +---+
-  <<NOTE:>> You will need protoc 2.5.0 installed.
+  <<NOTE:>> You will need {{{http://code.google.com/p/protobuf}protoc 2.5.0}}
+            installed.
 
   To ignore the native builds in mapreduce you can omit the <<<-Pnative>>> argument
   for maven. The tarball should be available in <<<target/>>> directory. 

Modified: hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java?rev=1561944&r1=1561943&r2=1561944&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java Tue Jan 28 05:37:13 2014
@@ -28,10 +28,30 @@ public class TestVersionUtil {
     // Equal versions are equal.
     assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
     assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
-    assertEquals(0, VersionUtil.compareVersions("1", "1"));
     assertEquals(0, VersionUtil.compareVersions(
         "2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT"));
-    
+
+    assertEquals(0, VersionUtil.compareVersions("1", "1"));
+    assertEquals(0, VersionUtil.compareVersions("1", "1.0"));
+    assertEquals(0, VersionUtil.compareVersions("1", "1.0.0"));
+
+    assertEquals(0, VersionUtil.compareVersions("1.0", "1"));
+    assertEquals(0, VersionUtil.compareVersions("1.0", "1.0"));
+    assertEquals(0, VersionUtil.compareVersions("1.0", "1.0.0"));
+
+    assertEquals(0, VersionUtil.compareVersions("1.0.0", "1"));
+    assertEquals(0, VersionUtil.compareVersions("1.0.0", "1.0"));
+    assertEquals(0, VersionUtil.compareVersions("1.0.0", "1.0.0"));
+
+    assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha-1", "1.0.0-a1"));
+    assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha-2", "1.0.0-a2"));
+    assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha1", "1.0.0-alpha-1"));
+
+    assertEquals(0, VersionUtil.compareVersions("1a0", "1.0.0-alpha-0"));
+    assertEquals(0, VersionUtil.compareVersions("1a0", "1-a0"));
+    assertEquals(0, VersionUtil.compareVersions("1.a0", "1-a0"));
+    assertEquals(0, VersionUtil.compareVersions("1.a0", "1.0.0-alpha-0"));
+
     // Assert that lower versions are lower, and higher versions are higher.
     assertExpectedValues("1", "2.0.0");
     assertExpectedValues("1.0.0", "2");
@@ -51,15 +71,27 @@ public class TestVersionUtil {
     assertExpectedValues("1.0.2a", "1.0.2ab");
     assertExpectedValues("1.0.0a1", "1.0.0a2");
     assertExpectedValues("1.0.0a2", "1.0.0a10");
+    // The 'a' in "1.a" is not followed by digit, thus not treated as "alpha",
+    // and treated larger than "1.0", per maven's ComparableVersion class
+    // implementation.
     assertExpectedValues("1.0", "1.a");
-    assertExpectedValues("1.0", "1.a0");
+    //The 'a' in "1.a0" is followed by digit, thus treated as "alpha-<digit>"
+    assertExpectedValues("1.a0", "1.0");
+    assertExpectedValues("1a0", "1.0");    
+    assertExpectedValues("1.0.1-alpha-1", "1.0.1-alpha-2");    
+    assertExpectedValues("1.0.1-beta-1", "1.0.1-beta-2");
     
     // Snapshot builds precede their eventual releases.
     assertExpectedValues("1.0-SNAPSHOT", "1.0");
-    assertExpectedValues("1.0", "1.0.0-SNAPSHOT");
+    assertExpectedValues("1.0.0-SNAPSHOT", "1.0");
     assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0");
     assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT");
     assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1");
+    assertExpectedValues("1.0.1-SNAPSHOT", "1.0.2");
+    
+    assertExpectedValues("1.0.1-alpha-1", "1.0.1-SNAPSHOT");
+    assertExpectedValues("1.0.1-beta-1", "1.0.1-SNAPSHOT");
+    assertExpectedValues("1.0.1-beta-2", "1.0.1-SNAPSHOT");
   }
   
   private static void assertExpectedValues(String lower, String higher) {



Mime
View raw message