hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1444439 - in /hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common: ./ src/main/bin/ src/main/docs/ src/main/java/ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/fs/shell/ src/main/java/org/apache/hadoo...
Date Sat, 09 Feb 2013 21:29:48 GMT
Author: szetszwo
Date: Sat Feb  9 21:29:44 2013
New Revision: 1444439

URL: http://svn.apache.org/r1444439
Log:
Merge r1441206 through r1444434 from trunk.

Added:
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ConnectTimeoutException.java
      - copied unchanged from r1444434, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ConnectTimeoutException.java
Modified:
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BoundedByteArrayOutputStream.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/core/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBoundedByteArrayOutputStream.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSortedMapWritable.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/GetGroupsTestBase.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt Sat Feb  9 21:29:44 2013
@@ -149,6 +149,8 @@ Trunk (Unreleased)
     HADOOP-8924. Add maven plugin alternative to shell script to save
     package-info.java. (Chris Nauroth via suresh)
 
+    HADOOP-9277. Improve javadoc for FileContext. (Andrew Wang via suresh)
+
   BUG FIXES
 
     HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
@@ -339,7 +341,26 @@ Trunk (Unreleased)
 
     HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
 
-Release 2.0.3-alpha - Unreleased 
+Release 2.0.4-beta - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+    HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
+
+  IMPROVEMENTS
+
+    HADOOP-9253. Capture ulimit info in the logs at service start time.
+    (Arpit Gupta via suresh)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
+
+Release 2.0.3-alpha - 2013-02-06 
 
   INCOMPATIBLE CHANGES
 
@@ -467,6 +488,9 @@ Release 2.0.3-alpha - Unreleased 
     HADOOP-9231. Parametrize staging URL for the uniformity of
     distributionManagement. (Konstantin Boudnik via suresh)
 
+    HADOOP-9276. Allow BoundedByteArrayOutputStream to be resettable.
+    (Arun Murthy via hitesh)
+
   OPTIMIZATIONS
 
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
@@ -592,6 +616,22 @@ Release 2.0.3-alpha - Unreleased 
     HADOOP-9221. Convert remaining xdocs to APT. (Andy Isaacson via atm)
 
     HADOOP-8981. TestMetricsSystemImpl fails on Windows. (Xuan Gong via suresh)
+    
+    HADOOP-9124. SortedMapWritable violates contract of Map interface for
+    equals() and hashCode(). (Surenkumar Nihalani via tomwhite)
+
+    HADOOP-9252. In StringUtils, humanReadableInt(..) has a race condition and
+    the synchronization of limitDecimalTo2(double) can be avoided.  (szetszwo)
+
+    HADOOP-9260. Hadoop version may be not correct when starting name node or
+    data node. (Chris Nauroth via jlowe)
+
+    HADOOP-9278. Fix the file handle leak in HarMetaData.parseMetaData() in
+    HarFileSystem. (Chris Nauroth via szetszwo)
+
+    HADOOP-9289. FsShell rm -f fails for non-matching globs. (Daryn Sharp via
+    suresh)
+
 
 Release 2.0.2-alpha - 2012-09-07 
 
@@ -1294,6 +1334,9 @@ Release 0.23.7 - UNRELEASED
     HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx
     permissions (Ivan A. Veselovsky via bobby)
 
+    HADOOP-9067. provide test for LocalFileSystem.reportChecksumFailure
+    (Ivan A. Veselovsky via bobby) 
+
   OPTIMIZATIONS
 
   BUG FIXES

Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1441206-1444434

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh Sat Feb  9 21:29:44 2013
@@ -83,7 +83,8 @@ fi
 if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
   export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
   export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
-  export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER   
+  export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+  starting_secure_dn="true"
 fi
 
 if [ "$HADOOP_IDENT_STRING" = "" ]; then
@@ -154,7 +155,17 @@ case $startStop in
       ;;
     esac
     echo $! > $pid
-    sleep 1; head "$log"
+    sleep 1
+    # capture the ulimit output
+    if [ "true" = "$starting_secure_dn" ]; then
+      echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
+      # capture the ulimit info for the appropriate user
+      su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
+    else
+      echo "ulimit -a for user $USER" >> $log
+      ulimit -a >> $log 2>&1
+    fi
+    head -30 "$log"
     sleep 3;
     if ! ps -p $! > /dev/null ; then
       exit 1

Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1441206-1444434

Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1441206-1444434

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java Sat Feb  9 21:29:44 2013
@@ -57,70 +57,60 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.util.ShutdownHookManager;
 
 /**
- * The FileContext class provides an interface to the application writer for
- * using the Hadoop file system.
- * It provides a set of methods for the usual operation: create, open, 
- * list, etc 
+ * The FileContext class provides an interface for users of the Hadoop
+ * file system. It exposes a number of file system operations, e.g. create,
+ * open, list.
  * 
- * <p>
- * <b> *** Path Names *** </b>
- * <p>
+ * <h2>Path Names</h2>
  * 
- * The Hadoop file system supports a URI name space and URI names.
- * It offers a forest of file systems that can be referenced using fully
- * qualified URIs.
- * Two common Hadoop file systems implementations are
+ * The Hadoop file system supports a URI namespace and URI names. This enables
+ * multiple types of file systems to be referenced using fully-qualified URIs.
+ * Two common Hadoop file system implementations are
  * <ul>
- * <li> the local file system: file:///path
- * <li> the hdfs file system hdfs://nnAddress:nnPort/path
+ * <li>the local file system: file:///path
+ * <li>the HDFS file system: hdfs://nnAddress:nnPort/path
  * </ul>
  * 
- * While URI names are very flexible, it requires knowing the name or address
- * of the server. For convenience one often wants to access the default system
- * in one's environment without knowing its name/address. This has an
- * additional benefit that it allows one to change one's default fs
- *  (e.g. admin moves application from cluster1 to cluster2).
+ * The Hadoop file system also supports additional naming schemes besides URIs.
+ * Hadoop has the concept of a <i>default file system</i>, which implies a
+ * default URI scheme and authority. This enables <i>slash-relative names</i>
+ * relative to the default FS, which are more convenient for users and
+ * application writers. The default FS is typically set by the user's
+ * environment, though it can also be manually specified.
  * <p>
  * 
- * To facilitate this, Hadoop supports a notion of a default file system.
- * The user can set his default file system, although this is
- * typically set up for you in your environment via your default config.
- * A default file system implies a default scheme and authority; slash-relative
- * names (such as /for/bar) are resolved relative to that default FS.
- * Similarly a user can also have working-directory-relative names (i.e. names
- * not starting with a slash). While the working directory is generally in the
- * same default FS, the wd can be in a different FS.
+ * Hadoop also supports <i>working-directory-relative</i> names, which are paths
+ * relative to the current working directory (similar to Unix). The working
+ * directory can be in a different file system than the default FS.
  * <p>
- *  Hence Hadoop path names can be one of:
- *  <ul>
- *  <li> fully qualified URI: scheme://authority/path
- *  <li> slash relative names: /path relative to the default file system
- *  <li> wd-relative names: path  relative to the working dir
- *  </ul>   
+ * Thus, Hadoop path names can be specified as one of the following:
+ * <ul>
+ * <li>a fully-qualified URI: scheme://authority/path (e.g.
+ * hdfs://nnAddress:nnPort/foo/bar)
+ * <li>a slash-relative name: path relative to the default file system (e.g.
+ * /foo/bar)
+ * <li>a working-directory-relative name: path relative to the working dir (e.g.
+ * foo/bar)
+ * </ul>
  *  Relative paths with scheme (scheme:foo/bar) are illegal.
  *  
- *  <p>
- *  <b>****The Role of the FileContext and configuration defaults****</b>
- *  <p>
- *  The FileContext provides file namespace context for resolving file names;
- *  it also contains the umask for permissions, In that sense it is like the
- *  per-process file-related state in Unix system.
- *  These two properties
- *  <ul> 
- *  <li> default file system i.e your slash)
- *  <li> umask
- *  </ul>
- *  in general, are obtained from the default configuration file
- *  in your environment,  (@see {@link Configuration}).
- *  
- *  No other configuration parameters are obtained from the default config as 
- *  far as the file context layer is concerned. All file system instances
- *  (i.e. deployments of file systems) have default properties; we call these
- *  server side (SS) defaults. Operation like create allow one to select many 
- *  properties: either pass them in as explicit parameters or use
- *  the SS properties.
- *  <p>
- *  The file system related SS defaults are
+ * <h2>Role of FileContext and Configuration Defaults</h2>
+ *
+ * The FileContext is the analogue of per-process file-related state in Unix. It
+ * contains two properties:
+ * 
+ * <ul>
+ * <li>the default file system (for resolving slash-relative names)
+ * <li>the umask (for file permissions)
+ * </ul>
+ * In general, these properties are obtained from the default configuration file
+ * in the user's environment (see {@link Configuration}).
+ * 
+ * Further file system properties are specified on the server-side. File system
+ * operations default to using these server-side defaults unless otherwise
+ * specified.
+ * <p>
+ * The file system related server-side defaults are:
  *  <ul>
  *  <li> the home directory (default is "/user/userName")
  *  <li> the initial wd (only for local fs)
@@ -131,34 +121,34 @@ import org.apache.hadoop.util.ShutdownHo
  *  <li> checksum option. (checksumType and  bytesPerChecksum)
  *  </ul>
  *
- * <p>
- * <b> *** Usage Model for the FileContext class *** </b>
- * <p>
+ * <h2>Example Usage</h2>
+ *
  * Example 1: use the default config read from the $HADOOP_CONFIG/core.xml.
  *   Unspecified values come from core-defaults.xml in the release jar.
  *  <ul>  
  *  <li> myFContext = FileContext.getFileContext(); // uses the default config
  *                                                // which has your default FS 
  *  <li>  myFContext.create(path, ...);
- *  <li>  myFContext.setWorkingDir(path)
+ *  <li>  myFContext.setWorkingDir(path);
  *  <li>  myFContext.open (path, ...);  
+ *  <li>...
  *  </ul>  
  * Example 2: Get a FileContext with a specific URI as the default FS
  *  <ul>  
- *  <li> myFContext = FileContext.getFileContext(URI)
+ *  <li> myFContext = FileContext.getFileContext(URI);
  *  <li> myFContext.create(path, ...);
- *   ...
- * </ul> 
+ *  <li>...
+ * </ul>
  * Example 3: FileContext with local file system as the default
  *  <ul> 
- *  <li> myFContext = FileContext.getLocalFSFileContext()
+ *  <li> myFContext = FileContext.getLocalFSFileContext();
  *  <li> myFContext.create(path, ...);
  *  <li> ...
  *  </ul> 
  * Example 4: Use a specific config, ignoring $HADOOP_CONFIG
  *  Generally you should not need use a config unless you are doing
  *   <ul> 
- *   <li> configX = someConfigSomeOnePassedToYou.
+ *   <li> configX = someConfigSomeOnePassedToYou;
  *   <li> myFContext = getFileContext(configX); // configX is not changed,
  *                                              // is passed down 
  *   <li> myFContext.create(path, ...);

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java Sat Feb  9 21:29:44 2013
@@ -30,8 +30,11 @@ import java.util.TreeMap;
 import java.util.HashMap;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.Progressable;
@@ -50,6 +53,9 @@ import org.apache.hadoop.util.Progressab
  */
 
 public class HarFileSystem extends FilterFileSystem {
+
+  private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
+
   public static final int VERSION = 3;
 
   private static final Map<URI, HarMetaData> harMetaCache =
@@ -1025,68 +1031,69 @@ public class HarFileSystem extends Filte
     }
 
     private void parseMetaData() throws IOException {
-      FSDataInputStream in = fs.open(masterIndexPath);
-      FileStatus masterStat = fs.getFileStatus(masterIndexPath);
-      masterIndexTimestamp = masterStat.getModificationTime();
-      LineReader lin = new LineReader(in, getConf());
-      Text line = new Text();
-      long read = lin.readLine(line);
-
-     // the first line contains the version of the index file
-      String versionLine = line.toString();
-      String[] arr = versionLine.split(" ");
-      version = Integer.parseInt(arr[0]);
-      // make it always backwards-compatible
-      if (this.version > HarFileSystem.VERSION) {
-        throw new IOException("Invalid version " + 
-            this.version + " expected " + HarFileSystem.VERSION);
-      }
-
-      // each line contains a hashcode range and the index file name
-      String[] readStr = null;
-      while(read < masterStat.getLen()) {
-        int b = lin.readLine(line);
-        read += b;
-        readStr = line.toString().split(" ");
-        int startHash = Integer.parseInt(readStr[0]);
-        int endHash  = Integer.parseInt(readStr[1]);
-        stores.add(new Store(Long.parseLong(readStr[2]), 
-            Long.parseLong(readStr[3]), startHash,
-            endHash));
-        line.clear();
-      }
+      Text line;
+      long read;
+      FSDataInputStream in = null;
+      LineReader lin = null;
+
       try {
-        // close the master index
-        lin.close();
-      } catch(IOException io){
-        // do nothing just a read.
-      }
+        in = fs.open(masterIndexPath);
+        FileStatus masterStat = fs.getFileStatus(masterIndexPath);
+        masterIndexTimestamp = masterStat.getModificationTime();
+        lin = new LineReader(in, getConf());
+        line = new Text();
+        read = lin.readLine(line);
+
+        // the first line contains the version of the index file
+        String versionLine = line.toString();
+        String[] arr = versionLine.split(" ");
+        version = Integer.parseInt(arr[0]);
+        // make it always backwards-compatible
+        if (this.version > HarFileSystem.VERSION) {
+          throw new IOException("Invalid version " + 
+              this.version + " expected " + HarFileSystem.VERSION);
+        }
 
-      FSDataInputStream aIn = fs.open(archiveIndexPath);
-      FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
-      archiveIndexTimestamp = archiveStat.getModificationTime();
-      LineReader aLin;
-
-      // now start reading the real index file
-      for (Store s: stores) {
-        read = 0;
-        aIn.seek(s.begin);
-        aLin = new LineReader(aIn, getConf());
-        while (read + s.begin < s.end) {
-          int tmp = aLin.readLine(line);
-          read += tmp;
-          String lineFeed = line.toString();
-          String[] parsed = lineFeed.split(" ");
-          parsed[0] = decodeFileName(parsed[0]);
-          archive.put(new Path(parsed[0]), new HarStatus(lineFeed));
+        // each line contains a hashcode range and the index file name
+        String[] readStr = null;
+        while(read < masterStat.getLen()) {
+          int b = lin.readLine(line);
+          read += b;
+          readStr = line.toString().split(" ");
+          int startHash = Integer.parseInt(readStr[0]);
+          int endHash  = Integer.parseInt(readStr[1]);
+          stores.add(new Store(Long.parseLong(readStr[2]), 
+              Long.parseLong(readStr[3]), startHash,
+              endHash));
           line.clear();
         }
+      } finally {
+        IOUtils.cleanup(LOG, lin, in);
       }
+
+      FSDataInputStream aIn = fs.open(archiveIndexPath);
       try {
-        // close the archive index
-        aIn.close();
-      } catch(IOException io) {
-        // do nothing just a read.
+        FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
+        archiveIndexTimestamp = archiveStat.getModificationTime();
+        LineReader aLin;
+
+        // now start reading the real index file
+        for (Store s: stores) {
+          read = 0;
+          aIn.seek(s.begin);
+          aLin = new LineReader(aIn, getConf());
+          while (read + s.begin < s.end) {
+            int tmp = aLin.readLine(line);
+            read += tmp;
+            String lineFeed = line.toString();
+            String[] parsed = lineFeed.split(" ");
+            parsed[0] = decodeFileName(parsed[0]);
+            archive.put(new Path(parsed[0]), new HarStatus(lineFeed));
+            line.clear();
+          }
+        }
+      } finally {
+        IOUtils.cleanup(LOG, aIn);
       }
     }
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java Sat Feb  9 21:29:44 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.shell;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.LinkedList;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -28,6 +29,7 @@ import org.apache.hadoop.fs.PathIOExcept
 import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.Trash;
 
 /**
@@ -72,6 +74,19 @@ class Delete {
     }
 
     @Override
+    protected List<PathData> expandArgument(String arg) throws IOException {
+      try {
+        return super.expandArgument(arg);
+      } catch (PathNotFoundException e) {
+        if (!ignoreFNF) {
+          throw e;
+        }
+        // prevent -f on a non-existent glob from failing
+        return new LinkedList<PathData>();
+      }
+    }
+
+    @Override
     protected void processNonexistentPath(PathData item) throws IOException {
       if (!ignoreFNF) super.processNonexistentPath(item);
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java Sat Feb  9 21:29:44 2013
@@ -48,7 +48,7 @@ class FsUsage extends FsCommand {
   
   protected String formatSize(long size) {
     return humanReadable
-        ? StringUtils.humanReadableInt(size)
+        ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
         : String.valueOf(size);
   }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java Sat Feb  9 21:29:44 2013
@@ -67,7 +67,7 @@ class Ls extends FsCommand {
   protected boolean humanReadable = false;
   protected String formatSize(long size) {
     return humanReadable
-      ? StringUtils.humanReadableInt(size)
+      ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
       : String.valueOf(size);
   }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BoundedByteArrayOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BoundedByteArrayOutputStream.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BoundedByteArrayOutputStream.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BoundedByteArrayOutputStream.java Sat Feb  9 21:29:44 2013
@@ -32,9 +32,10 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Unstable
 public class BoundedByteArrayOutputStream extends OutputStream {
-  private final byte[] buffer;
+  private byte[] buffer;
+  private int startOffset;
   private int limit;
-  private int count;
+  private int currentPointer;
 
   /**
    * Create a BoundedByteArrayOutputStream with the specified
@@ -52,20 +53,30 @@ public class BoundedByteArrayOutputStrea
    * @param limit The maximum limit upto which data can be written
    */
   public BoundedByteArrayOutputStream(int capacity, int limit) {
+    this(new byte[capacity], 0, limit);
+  }
+
+  protected BoundedByteArrayOutputStream(byte[] buf, int offset, int limit) {
+    resetBuffer(buf, offset, limit);
+  }
+  
+  protected void resetBuffer(byte[] buf, int offset, int limit) {
+    int capacity = buf.length - offset;
     if ((capacity < limit) || (capacity | limit) < 0) {
       throw new IllegalArgumentException("Invalid capacity/limit");
     }
-    this.buffer = new byte[capacity];
-    this.limit = limit;
-    this.count = 0;
+    this.buffer = buf;
+    this.startOffset = offset;
+    this.currentPointer = offset;
+    this.limit = offset + limit;
   }
-
+  
   @Override
   public void write(int b) throws IOException {
-    if (count >= limit) {
+    if (currentPointer >= limit) {
       throw new EOFException("Reaching the limit of the buffer.");
     }
-    buffer[count++] = (byte) b;
+    buffer[currentPointer++] = (byte) b;
   }
 
   @Override
@@ -77,12 +88,12 @@ public class BoundedByteArrayOutputStrea
       return;
     }
 
-    if (count + len > limit) {
+    if (currentPointer + len > limit) {
       throw new EOFException("Reach the limit of the buffer");
     }
 
-    System.arraycopy(b, off, buffer, count, len);
-    count += len;
+    System.arraycopy(b, off, buffer, currentPointer, len);
+    currentPointer += len;
   }
 
   /**
@@ -90,17 +101,17 @@ public class BoundedByteArrayOutputStrea
    * @param newlim New Limit
    */
   public void reset(int newlim) {
-    if (newlim > buffer.length) {
+    if (newlim > (buffer.length - startOffset)) {
       throw new IndexOutOfBoundsException("Limit exceeds buffer size");
     }
     this.limit = newlim;
-    this.count = 0;
+    this.currentPointer = startOffset;
   }
 
   /** Reset the buffer */
   public void reset() {
-    this.limit = buffer.length;
-    this.count = 0;
+    this.limit = buffer.length - startOffset;
+    this.currentPointer = startOffset;
   }
 
   /** Return the current limit */
@@ -119,6 +130,10 @@ public class BoundedByteArrayOutputStrea
    * currently in the buffer.
    */
   public int size() {
-    return count;
+    return currentPointer - startOffset;
+  }
+  
+  public int available() {
+    return limit - currentPointer;
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java Sat Feb  9 21:29:44 2013
@@ -203,4 +203,27 @@ public class SortedMapWritable extends A
       e.getValue().write(out);
     }
   }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+
+    if (obj instanceof SortedMapWritable) {
+      Map map = (Map) obj;
+      if (size() != map.size()) {
+        return false;
+      }
+
+      return entrySet().equals(map.entrySet());
+    }
+
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    return instance.hashCode();
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java Sat Feb  9 21:29:44 2013
@@ -35,6 +35,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.net.ConnectTimeoutException;
 
 /**
  * <p>
@@ -543,6 +544,7 @@ public class RetryPolicies {
           e instanceof NoRouteToHostException ||
           e instanceof UnknownHostException ||
           e instanceof StandbyException ||
+          e instanceof ConnectTimeoutException ||
           isWrappedStandbyException(e)) {
         return new RetryAction(
             RetryAction.RetryDecision.FAILOVER_AND_RETRY,

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java Sat Feb  9 21:29:44 2013
@@ -67,6 +67,7 @@ import org.apache.hadoop.ipc.protobuf.Rp
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
+import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SaslRpcClient;
@@ -511,14 +512,14 @@ public class Client {
           }
           this.socket.setSoTimeout(pingInterval);
           return;
-        } catch (SocketTimeoutException toe) {
+        } catch (ConnectTimeoutException toe) {
           /* Check for an address change and update the local reference.
            * Reset the failure counter if the address was changed
            */
           if (updateAddress()) {
             timeoutFailures = ioFailures = 0;
           }
-          handleConnectionFailure(timeoutFailures++,
+          handleConnectionTimeout(timeoutFailures++,
               maxRetriesOnSocketTimeouts, toe);
         } catch (IOException ie) {
           if (updateAddress()) {
@@ -680,7 +681,7 @@ public class Client {
       socket = null;
     }
 
-    /* Handle connection failures
+    /* Handle connection failures due to timeout on connect
      *
      * If the current number of retries is equal to the max number of retries,
      * stop retrying and throw the exception; Otherwise backoff 1 second and
@@ -694,7 +695,7 @@ public class Client {
      * @param ioe failure reason
      * @throws IOException if max number of retries is reached
      */
-    private void handleConnectionFailure(
+    private void handleConnectionTimeout(
         int curRetries, int maxRetries, IOException ioe) throws IOException {
 
       closeConnection();

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java Sat Feb  9 21:29:44 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.net;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.lang.reflect.Constructor;
 import java.net.BindException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -517,11 +518,15 @@ public class NetUtils {
       socket.bind(localAddr);
     }
 
-    if (ch == null) {
-      // let the default implementation handle it.
-      socket.connect(endpoint, timeout);
-    } else {
-      SocketIOWithTimeout.connect(ch, endpoint, timeout);
+    try {
+      if (ch == null) {
+        // let the default implementation handle it.
+        socket.connect(endpoint, timeout);
+      } else {
+        SocketIOWithTimeout.connect(ch, endpoint, timeout);
+      }
+    } catch (SocketTimeoutException ste) {
+      throw new ConnectTimeoutException(ste.getMessage());
     }
 
     // There is a very rare case allowed by the TCP specification, such that
@@ -719,7 +724,7 @@ public class NetUtils {
               + see("BindException"));
     } else if (exception instanceof ConnectException) {
       // connection refused; include the host:port in the error
-      return (ConnectException) new ConnectException(
+      return wrapWithMessage(exception, 
           "Call From "
               + localHost
               + " to "
@@ -729,32 +734,28 @@ public class NetUtils {
               + " failed on connection exception: "
               + exception
               + ";"
-              + see("ConnectionRefused"))
-          .initCause(exception);
+              + see("ConnectionRefused"));
     } else if (exception instanceof UnknownHostException) {
-      return (UnknownHostException) new UnknownHostException(
+      return wrapWithMessage(exception,
           "Invalid host name: "
               + getHostDetailsAsString(destHost, destPort, localHost)
               + exception
               + ";"
-              + see("UnknownHost"))
-          .initCause(exception);
+              + see("UnknownHost"));
     } else if (exception instanceof SocketTimeoutException) {
-      return (SocketTimeoutException) new SocketTimeoutException(
+      return wrapWithMessage(exception,
           "Call From "
               + localHost + " to " + destHost + ":" + destPort
               + " failed on socket timeout exception: " + exception
               + ";"
-              + see("SocketTimeout"))
-          .initCause(exception);
+              + see("SocketTimeout"));
     } else if (exception instanceof NoRouteToHostException) {
-      return (NoRouteToHostException) new NoRouteToHostException(
+      return wrapWithMessage(exception,
           "No Route to Host from  "
               + localHost + " to " + destHost + ":" + destPort
               + " failed on socket timeout exception: " + exception
               + ";"
-              + see("NoRouteToHost"))
-          .initCause(exception);
+              + see("NoRouteToHost"));
     }
     else {
       return (IOException) new IOException("Failed on local exception: "
@@ -769,6 +770,21 @@ public class NetUtils {
   private static String see(final String entry) {
     return FOR_MORE_DETAILS_SEE + HADOOP_WIKI + entry;
   }
+  
+  @SuppressWarnings("unchecked")
+  private static <T extends IOException> T wrapWithMessage(
+      T exception, String msg) {
+    Class<? extends Throwable> clazz = exception.getClass();
+    try {
+      Constructor<? extends Throwable> ctor = clazz.getConstructor(String.class);
+      Throwable t = ctor.newInstance(msg);
+      return (T)(t.initCause(exception));
+    } catch (Throwable e) {
+      LOG.warn("Unable to wrap exception of type " +
+          clazz + ": it has no (String) constructor", e);
+      return exception;
+    }
+  }
 
   /**
    * Get the host details as a string

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java Sat Feb  9 21:29:44 2013
@@ -301,17 +301,25 @@ public class UserGroupInformation {
   
   private static String OS_LOGIN_MODULE_NAME;
   private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
+  
   private static final boolean windows =
       System.getProperty("os.name").startsWith("Windows");
   private static final boolean is64Bit =
       System.getProperty("os.arch").contains("64");
+  private static final boolean ibmJava = System.getProperty("java.vendor").contains("IBM");
+  private static final boolean aix = System.getProperty("os.name").equals("AIX");
+
   /* Return the OS login module class name */
   private static String getOSLoginModuleName() {
-    if (System.getProperty("java.vendor").contains("IBM")) {
-      return windows ? (is64Bit
-          ? "com.ibm.security.auth.module.Win64LoginModule"
-          : "com.ibm.security.auth.module.NTLoginModule")
-        : "com.ibm.security.auth.module.LinuxLoginModule";
+    if (ibmJava) {
+      if (windows) {
+        return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
+            : "com.ibm.security.auth.module.NTLoginModule";
+      } else if (aix) {
+        return "com.ibm.security.auth.module.AIXLoginModule";
+      } else {
+        return "com.ibm.security.auth.module.LinuxLoginModule";
+      }
     } else {
       return windows ? "com.sun.security.auth.module.NTLoginModule"
         : "com.sun.security.auth.module.UnixLoginModule";
@@ -323,11 +331,14 @@ public class UserGroupInformation {
   private static Class<? extends Principal> getOsPrincipalClass() {
     ClassLoader cl = ClassLoader.getSystemClassLoader();
     try {
-      if (System.getProperty("java.vendor").contains("IBM")) {
+      if (ibmJava) {
         if (windows) {
           return (Class<? extends Principal>) (is64Bit
             ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
             : cl.loadClass("com.ibm.security.auth.NTUserPrincipal"));
+        } else if (aix) {
+          return (Class<? extends Principal>)
+             cl.loadClass("com.ibm.security.auth.AIXPrincipal");
         } else {
           return (Class<? extends Principal>) (is64Bit
             ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
@@ -418,12 +429,21 @@ public class UserGroupInformation {
     private static final Map<String,String> USER_KERBEROS_OPTIONS = 
       new HashMap<String,String>();
     static {
-      USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
-      USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
-      USER_KERBEROS_OPTIONS.put("renewTGT", "true");
+      if (ibmJava) {
+        USER_KERBEROS_OPTIONS.put("useDefaultCcache", "true");
+      } else {
+        USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
+        USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
+        USER_KERBEROS_OPTIONS.put("renewTGT", "true");
+      }
       String ticketCache = System.getenv("KRB5CCNAME");
       if (ticketCache != null) {
-        USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
+        if (ibmJava) {
+          // The first value searched when "useDefaultCcache" is used.
+          System.setProperty("KRB5CCNAME", ticketCache);
+        } else {
+          USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
+        }
       }
       USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
     }
@@ -434,10 +454,14 @@ public class UserGroupInformation {
     private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS = 
       new HashMap<String,String>();
     static {
-      KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
-      KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
-      KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
-      KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
+      if (ibmJava) {
+        KEYTAB_KERBEROS_OPTIONS.put("credsType", "both");
+      } else {
+        KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
+        KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
+        KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
+        KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
+      }
       KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);      
     }
     private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
@@ -462,7 +486,12 @@ public class UserGroupInformation {
       } else if (USER_KERBEROS_CONFIG_NAME.equals(appName)) {
         return USER_KERBEROS_CONF;
       } else if (KEYTAB_KERBEROS_CONFIG_NAME.equals(appName)) {
-        KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
+        if (ibmJava) {
+          KEYTAB_KERBEROS_OPTIONS.put("useKeytab",
+              prependFileAuthority(keytabFile));
+        } else {
+          KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
+        }
         KEYTAB_KERBEROS_OPTIONS.put("principal", keytabPrincipal);
         return KEYTAB_KERBEROS_CONF;
       }
@@ -470,6 +499,11 @@ public class UserGroupInformation {
     }
   }
 
+  private static String prependFileAuthority(String keytabPath) {
+    return keytabPath.startsWith("file://") ? keytabPath
+        : "file://" + keytabPath;
+  }
+
   /**
    * Represents a javax.security configuration that is created at runtime.
    */
@@ -666,6 +700,7 @@ public class UserGroupInformation {
         }
         loginUser.spawnAutoRenewalThreadForUserCreds();
       } catch (LoginException le) {
+        LOG.debug("failure to login", le);
         throw new IOException("failure to login", le);
       }
       if (LOG.isDebugEnabled()) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java Sat Feb  9 21:29:44 2013
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.util;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.io.InputStream;
 
@@ -39,7 +40,7 @@ import org.apache.hadoop.io.Text;
  */
 @InterfaceAudience.LimitedPrivate({"MapReduce"})
 @InterfaceStability.Unstable
-public class LineReader {
+public class LineReader implements Closeable {
   private static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
   private int bufferSize = DEFAULT_BUFFER_SIZE;
   private InputStream in;

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java Sat Feb  9 21:29:44 2013
@@ -23,8 +23,6 @@ import java.io.StringWriter;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.text.DateFormat;
-import java.text.DecimalFormat;
-import java.text.NumberFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -34,12 +32,13 @@ import java.util.List;
 import java.util.Locale;
 import java.util.StringTokenizer;
 
-import com.google.common.net.InetAddresses;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.net.NetUtils;
 
+import com.google.common.net.InetAddresses;
+
 /**
  * General string utils
  */
@@ -52,13 +51,6 @@ public class StringUtils {
    */
   public static final int SHUTDOWN_HOOK_PRIORITY = 0;
 
-  private static final DecimalFormat decimalFormat;
-  static {
-          NumberFormat numberFormat = NumberFormat.getNumberInstance(Locale.ENGLISH);
-          decimalFormat = (DecimalFormat) numberFormat;
-          decimalFormat.applyPattern("#.##");
-  }
-
   /**
    * Make a string representation of the exception.
    * @param e The exception to stringify
@@ -87,50 +79,33 @@ public class StringUtils {
     }
     return fullHostname;
   }
-
-  private static DecimalFormat oneDecimal = new DecimalFormat("0.0");
   
   /**
    * Given an integer, return a string that is in an approximate, but human 
    * readable format. 
-   * It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
    * @param number the number to format
    * @return a human readable form of the integer
+   *
+   * @deprecated use {@link TraditionalBinaryPrefix#long2String(long, String, int)}.
    */
+  @Deprecated
   public static String humanReadableInt(long number) {
-    long absNumber = Math.abs(number);
-    double result = number;
-    String suffix = "";
-    if (absNumber < 1024) {
-      // since no division has occurred, don't format with a decimal point
-      return String.valueOf(number);
-    } else if (absNumber < 1024 * 1024) {
-      result = number / 1024.0;
-      suffix = "k";
-    } else if (absNumber < 1024 * 1024 * 1024) {
-      result = number / (1024.0 * 1024);
-      suffix = "m";
-    } else {
-      result = number / (1024.0 * 1024 * 1024);
-      suffix = "g";
-    }
-    return oneDecimal.format(result) + suffix;
+    return TraditionalBinaryPrefix.long2String(number, "", 1);
   }
-  
+
+  /** The same as String.format(Locale.ENGLISH, format, objects). */
+  public static String format(final String format, final Object... objects) {
+    return String.format(Locale.ENGLISH, format, objects);
+  }
+
   /**
    * Format a percentage for presentation to the user.
-   * @param done the percentage to format (0.0 to 1.0)
-   * @param digits the number of digits past the decimal point
+   * @param fraction the percentage as a fraction, e.g. 0.1 = 10%
+   * @param decimalPlaces the number of decimal places
    * @return a string representation of the percentage
    */
-  public static String formatPercent(double done, int digits) {
-    DecimalFormat percentFormat = new DecimalFormat("0.00%");
-    double scale = Math.pow(10.0, digits+2);
-    double rounded = Math.floor(done * scale);
-    percentFormat.setDecimalSeparatorAlwaysShown(false);
-    percentFormat.setMinimumFractionDigits(digits);
-    percentFormat.setMaximumFractionDigits(digits);
-    return percentFormat.format(rounded / scale);
+  public static String formatPercent(double fraction, int decimalPlaces) {
+    return format("%." + decimalPlaces + "f%%", fraction*100);
   }
   
   /**
@@ -165,7 +140,7 @@ public class StringUtils {
     }
     StringBuilder s = new StringBuilder(); 
     for(int i = start; i < end; i++) {
-      s.append(String.format("%02x", bytes[i]));
+      s.append(format("%02x", bytes[i]));
     }
     return s.toString();
   }
@@ -630,18 +605,22 @@ public class StringUtils {
    * TraditionalBinaryPrefix symbol are case insensitive. 
    */
   public static enum TraditionalBinaryPrefix {
-    KILO(1024),
-    MEGA(KILO.value << 10),
-    GIGA(MEGA.value << 10),
-    TERA(GIGA.value << 10),
-    PETA(TERA.value << 10),
-    EXA(PETA.value << 10);
+    KILO(10),
+    MEGA(KILO.bitShift + 10),
+    GIGA(MEGA.bitShift + 10),
+    TERA(GIGA.bitShift + 10),
+    PETA(TERA.bitShift + 10),
+    EXA (PETA.bitShift + 10);
 
     public final long value;
     public final char symbol;
+    public final int bitShift;
+    public final long bitMask;
 
-    TraditionalBinaryPrefix(long value) {
-      this.value = value;
+    private TraditionalBinaryPrefix(int bitShift) {
+      this.bitShift = bitShift;
+      this.value = 1L << bitShift;
+      this.bitMask = this.value - 1L;
       this.symbol = toString().charAt(0);
     }
 
@@ -692,8 +671,58 @@ public class StringUtils {
         return num * prefix;
       }
     }
+
+    /**
+     * Convert a long integer to a string with traditional binary prefix.
+     * 
+     * @param n the value to be converted
+     * @param unit The unit, e.g. "B" for bytes.
+     * @param decimalPlaces The number of decimal places.
+     * @return a string with traditional binary prefix.
+     */
+    public static String long2String(long n, String unit, int decimalPlaces) {
+      if (unit == null) {
+        unit = "";
+      }
+      //take care a special case
+      if (n == Long.MIN_VALUE) {
+        return "-8 " + EXA.symbol + unit;
+      }
+
+      final StringBuilder b = new StringBuilder();
+      //take care negative numbers
+      if (n < 0) {
+        b.append('-');
+        n = -n;
+      }
+      if (n < KILO.value) {
+        //no prefix
+        b.append(n);
+        return (unit.isEmpty()? b: b.append(" ").append(unit)).toString();
+      } else {
+        //find traditional binary prefix
+        int i = 0;
+        for(; i < values().length && n >= values()[i].value; i++);
+        TraditionalBinaryPrefix prefix = values()[i - 1];
+
+        if ((n & prefix.bitMask) == 0) {
+          //exact division
+          b.append(n >> prefix.bitShift);
+        } else {
+          final String  format = "%." + decimalPlaces + "f";
+          String s = format(format, n/(double)prefix.value);
+          //check a special rounding up case
+          if (s.startsWith("1024")) {
+            prefix = values()[i];
+            s = format(format, n/(double)prefix.value);
+          }
+          b.append(s);
+        }
+        return b.append(' ').append(prefix.symbol).append(unit).toString();
+      }
+    }
   }
-  
+
     /**
      * Escapes HTML Special characters present in the string.
      * @param string
@@ -731,32 +760,16 @@ public class StringUtils {
     }
 
   /**
-   * Return an abbreviated English-language desc of the byte length
+   * @return a byte description of the given long interger value.
    */
   public static String byteDesc(long len) {
-    double val = 0.0;
-    String ending = "";
-    if (len < 1024 * 1024) {
-      val = (1.0 * len) / 1024;
-      ending = " KB";
-    } else if (len < 1024 * 1024 * 1024) {
-      val = (1.0 * len) / (1024 * 1024);
-      ending = " MB";
-    } else if (len < 1024L * 1024 * 1024 * 1024) {
-      val = (1.0 * len) / (1024 * 1024 * 1024);
-      ending = " GB";
-    } else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
-      val = (1.0 * len) / (1024L * 1024 * 1024 * 1024);
-      ending = " TB";
-    } else {
-      val = (1.0 * len) / (1024L * 1024 * 1024 * 1024 * 1024);
-      ending = " PB";
-    }
-    return limitDecimalTo2(val) + ending;
+    return TraditionalBinaryPrefix.long2String(len, "B", 2);
   }
 
-  public static synchronized String limitDecimalTo2(double d) {
-    return decimalFormat.format(d);
+  /** @deprecated use StringUtils.format("%.2f", d). */
+  @Deprecated
+  public static String limitDecimalTo2(double d) {
+    return format("%.2f", d);
   }
   
   /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm Sat Feb  9 21:29:44 2013
@@ -325,6 +325,13 @@ Hadoop MapReduce Next Generation - Clust
 | | | How long to keep aggregation logs before deleting them. -1 disables. |
 | | | Be careful, set this too small and you will spam the name node. |
 *-------------------------+-------------------------+------------------------+
+| <<<yarn.log-aggregation.retain-check-interval-seconds>>> | | |
+| | <-1> | |
+| | | Time between checks for aggregated log retention. If set to 0 or a |
+| | | negative value then the value is computed as one-tenth of the |
+| | | aggregated log retention time. |
+| | | Be careful, set this too small and you will spam the name node. |
+*-------------------------+-------------------------+------------------------+
 
 
 

Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1441206-1444434

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java Sat Feb  9 21:29:44 2013
@@ -304,6 +304,46 @@ public class TestFsShellReturnCode {
   }
   
   @Test
+  public void testRmWithNonexistentGlob() throws Exception {
+    Configuration conf = new Configuration();
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+    final PrintStream err = new PrintStream(bytes);
+    final PrintStream oldErr = System.err;
+    System.setErr(err);
+    final String results;
+    try {
+      int exit = shell.run(new String[]{"-rm", "nomatch*"});
+      assertEquals(1, exit);
+      results = bytes.toString();
+      assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
+    } finally {
+      IOUtils.closeStream(err);
+      System.setErr(oldErr);
+    }
+  }
+
+  @Test
+  public void testRmForceWithNonexistentGlob() throws Exception {
+    Configuration conf = new Configuration();
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+    final PrintStream err = new PrintStream(bytes);
+    final PrintStream oldErr = System.err;
+    System.setErr(err);
+    try {
+      int exit = shell.run(new String[]{"-rm", "-f", "nomatch*"});
+      assertEquals(0, exit);
+      assertTrue(bytes.toString().isEmpty());
+    } finally {
+      IOUtils.closeStream(err);
+      System.setErr(oldErr);
+    }
+  }
+
+  @Test
   public void testInvalidDefaultFS() throws Exception {
     // if default fs doesn't exist or is invalid, but the path provided in 
     // arguments is valid - fsshell should work

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java Sat Feb  9 21:29:44 2013
@@ -28,6 +28,7 @@ import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -46,8 +47,18 @@ public class TestHarFileSystemBasics {
 
   private static final String ROOT_PATH = System.getProperty("test.build.data",
       "build/test/data");
-  private static final Path rootPath = new Path(
-      new File(ROOT_PATH).getAbsolutePath() + "/localfs");
+  private static final Path rootPath;
+  static {
+    String root = new Path(new File(ROOT_PATH).getAbsolutePath(), "localfs")
+      .toUri().getPath();
+    // Strip drive specifier on Windows, which would make the HAR URI invalid and
+    // cause tests to fail.
+    if (Shell.WINDOWS) {
+      root = root.substring(root.indexOf(':') + 1);
+    }
+    rootPath = new Path(root);
+  }
+
   // NB: .har suffix is necessary
   private static final Path harPath = new Path(rootPath, "path1/path2/my.har");
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java Sat Feb  9 21:29:44 2013
@@ -45,19 +45,39 @@ public class TestListFiles {
 
   final protected static Configuration conf = new Configuration();
   protected static FileSystem fs;
-  final protected static Path TEST_DIR = getTestDir();
+  protected static Path TEST_DIR;
   final private static int FILE_LEN = 10;
-  final private static Path FILE1 = new Path(TEST_DIR, "file1");
-  final private static Path DIR1 = new Path(TEST_DIR, "dir1");
-  final private static Path FILE2 = new Path(DIR1, "file2");
-  final private static Path FILE3 = new Path(DIR1, "file3");
+  private static Path FILE1;
+  private static Path DIR1;
+  private static Path FILE2;
+  private static Path FILE3;
+
+  static {
+    setTestPaths(new Path(
+      System.getProperty("test.build.data", "build/test/data/work-dir/localfs"),
+      "main_"));
+  }
 
   protected static Path getTestDir() {
-    return new Path(
-      System.getProperty("test.build.data","build/test/data/work-dir/localfs"),
-      "main_");
+    return TEST_DIR;
   }
-  
+
+  /**
+   * Sets the root testing directory and reinitializes any additional test paths
+   * that are under the root.  This method is intended to be called from a
+   * subclass's @BeforeClass method if there is a need to override the testing
+   * directory.
+   * 
+   * @param testDir Path root testing directory
+   */
+  protected static void setTestPaths(Path testDir) {
+    TEST_DIR = testDir;
+    FILE1 = new Path(TEST_DIR, "file1");
+    DIR1 = new Path(TEST_DIR, "dir1");
+    FILE2 = new Path(DIR1, "file2");
+    FILE3 = new Path(DIR1, "file3");
+  }
+
   @BeforeClass
   public static void testSetUp() throws Exception {
     fs = FileSystem.getLocal(conf);

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java Sat Feb  9 21:29:44 2013
@@ -28,6 +28,7 @@ import java.io.*;
 import static org.junit.Assert.*;
 import static org.junit.Assume.assumeTrue;
 
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -38,8 +39,9 @@ public class TestLocalFileSystem {
   private static final String TEST_ROOT_DIR
     = System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
 
+  private final File base = new File(TEST_ROOT_DIR);
   private Configuration conf;
-  private FileSystem fileSys;
+  private LocalFileSystem fileSys;
 
   private void cleanupFile(FileSystem fs, Path name) throws IOException {
     assertTrue(fs.exists(name));
@@ -53,6 +55,13 @@ public class TestLocalFileSystem {
     fileSys = FileSystem.getLocal(conf);
     fileSys.delete(new Path(TEST_ROOT_DIR), true);
   }
+  
+  @After
+  public void after() throws IOException {
+    base.setWritable(true);
+    FileUtil.fullyDelete(base);
+    assertTrue(!base.exists());
+  }
 
   /**
    * Test the capability of setting the working directory.
@@ -269,10 +278,83 @@ public class TestLocalFileSystem {
     LocalFileSystem fs = FileSystem.getLocal(conf);
     File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
     colonFile.mkdirs();
-    colonFile.createNewFile();
     FileStatus[] stats = fs.listStatus(new Path(TEST_ROOT_DIR));
     assertEquals("Unexpected number of stats", 1, stats.length);
     assertEquals("Bad path from stat", colonFile.getAbsolutePath(),
         stats[0].getPath().toUri().getPath());
   }
+  
+  @Test
+  public void testReportChecksumFailure() throws IOException {
+    base.mkdirs();
+    assertTrue(base.exists() && base.isDirectory());
+    
+    final File dir1 = new File(base, "dir1");
+    final File dir2 = new File(dir1, "dir2");
+    dir2.mkdirs();
+    assertTrue(dir2.exists() && dir2.canWrite());
+    
+    final String dataFileName = "corruptedData";
+    final Path dataPath = new Path(new File(dir2, dataFileName).toURI());
+    final Path checksumPath = fileSys.getChecksumFile(dataPath);
+    final FSDataOutputStream fsdos = fileSys.create(dataPath);
+    try {
+      fsdos.writeUTF("foo");
+    } finally {
+      fsdos.close();
+    }
+    assertTrue(fileSys.pathToFile(dataPath).exists());
+    final long dataFileLength = fileSys.getFileStatus(dataPath).getLen();
+    assertTrue(dataFileLength > 0);
+    
+    // check the the checksum file is created and not empty:
+    assertTrue(fileSys.pathToFile(checksumPath).exists());
+    final long checksumFileLength = fileSys.getFileStatus(checksumPath).getLen();
+    assertTrue(checksumFileLength > 0);
+    
+    // this is a hack to force the #reportChecksumFailure() method to stop
+    // climbing up at the 'base' directory and use 'dir1/bad_files' as the 
+    // corrupted files storage:
+    base.setWritable(false);
+    
+    FSDataInputStream dataFsdis = fileSys.open(dataPath);
+    FSDataInputStream checksumFsdis = fileSys.open(checksumPath);
+    
+    boolean retryIsNecessary = fileSys.reportChecksumFailure(dataPath, dataFsdis, 0, checksumFsdis, 0);
+    assertTrue(!retryIsNecessary);
+    
+    // the data file should be moved:
+    assertTrue(!fileSys.pathToFile(dataPath).exists());
+    // the checksum file should be moved:
+    assertTrue(!fileSys.pathToFile(checksumPath).exists());
+    
+    // check that the files exist in the new location where they were moved:
+    File[] dir1files = dir1.listFiles(new FileFilter() {
+      @Override
+      public boolean accept(File pathname) {
+        return pathname != null && !pathname.getName().equals("dir2");
+      }
+    });
+    assertTrue(dir1files != null);
+    assertTrue(dir1files.length == 1);
+    File badFilesDir = dir1files[0];
+    
+    File[] badFiles = badFilesDir.listFiles();
+    assertTrue(badFiles != null);
+    assertTrue(badFiles.length == 2);
+    boolean dataFileFound = false;
+    boolean checksumFileFound = false;
+    for (File badFile: badFiles) {
+      if (badFile.getName().startsWith(dataFileName)) {
+        assertTrue(dataFileLength == badFile.length());
+        dataFileFound = true;
+      } else if (badFile.getName().contains(dataFileName + ".crc")) {
+        assertTrue(checksumFileLength == badFile.length());
+        checksumFileFound = true;
+      }
+    }
+    assertTrue(dataFileFound);
+    assertTrue(checksumFileFound);
+  }
+  
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBoundedByteArrayOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBoundedByteArrayOutputStream.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBoundedByteArrayOutputStream.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBoundedByteArrayOutputStream.java Sat Feb  9 21:29:44 2013
@@ -88,4 +88,61 @@ public class TestBoundedByteArrayOutputS
     assertTrue("Writing beyond limit did not throw an exception",
         caughtException);
   }
+  
+  
+  static class ResettableBoundedByteArrayOutputStream 
+  extends BoundedByteArrayOutputStream {
+
+    public ResettableBoundedByteArrayOutputStream(int capacity) {
+      super(capacity);
+    }
+
+    public void resetBuffer(byte[] buf, int offset, int length) {
+      super.resetBuffer(buf, offset, length);
+    }
+    
+  }
+  
+  public void testResetBuffer() throws IOException {
+    
+    ResettableBoundedByteArrayOutputStream stream = 
+      new ResettableBoundedByteArrayOutputStream(SIZE);
+
+    // Write to the stream, get the data back and check for contents
+    stream.write(INPUT, 0, SIZE);
+    assertTrue("Array Contents Mismatch",
+        Arrays.equals(INPUT, stream.getBuffer()));
+    
+    // Try writing beyond end of buffer. Should throw an exception
+    boolean caughtException = false;
+    
+    try {
+      stream.write(INPUT[0]);
+    } catch (Exception e) {
+      caughtException = true;
+    }
+    
+    assertTrue("Writing beyond limit did not throw an exception",
+        caughtException);
+    
+    //Reset the stream and try, should succeed
+    byte[] newBuf = new byte[SIZE];
+    stream.resetBuffer(newBuf, 0, newBuf.length);
+    assertTrue("Limit did not get reset correctly", 
+        (stream.getLimit() == SIZE));
+    stream.write(INPUT, 0, SIZE);
+    assertTrue("Array Contents Mismatch",
+        Arrays.equals(INPUT, stream.getBuffer()));
+  
+    // Try writing one more byte, should fail
+    caughtException = false;
+    try {
+      stream.write(INPUT[0]);
+    } catch (Exception e) {
+      caughtException = true;
+    }
+    assertTrue("Writing beyond limit did not throw an exception",
+        caughtException);
+  }
+
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSortedMapWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSortedMapWritable.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSortedMapWritable.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSortedMapWritable.java Sat Feb  9 21:29:44 2013
@@ -17,15 +17,20 @@
  */
 package org.apache.hadoop.io;
 
-import java.util.Map;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
-import junit.framework.TestCase;
+import java.util.Map;
+import org.junit.Test;
 
 /**
  * Tests SortedMapWritable
  */
-public class TestSortedMapWritable extends TestCase {
+public class TestSortedMapWritable {
   /** the test */
+  @Test
   @SuppressWarnings("unchecked")
   public void testSortedMapWritable() {
     Text[] keys = {
@@ -90,6 +95,7 @@ public class TestSortedMapWritable exten
   /**
    * Test that number of "unknown" classes is propagated across multiple copies.
    */
+  @Test
   @SuppressWarnings("deprecation")
   public void testForeignClass() {
     SortedMapWritable inMap = new SortedMapWritable();
@@ -99,4 +105,63 @@ public class TestSortedMapWritable exten
     SortedMapWritable copyOfCopy = new SortedMapWritable(outMap);
     assertEquals(1, copyOfCopy.getNewClasses());
   }
+  
+  /**
+   * Tests if equal and hashCode method still hold the contract.
+   */
+  @Test
+  public void testEqualsAndHashCode() {
+    String failureReason;
+    SortedMapWritable mapA = new SortedMapWritable();
+    SortedMapWritable mapB = new SortedMapWritable();
+    
+    // Sanity checks
+    failureReason = "SortedMapWritable couldn't be initialized. Got null reference";
+    assertNotNull(failureReason, mapA);
+    assertNotNull(failureReason, mapB);
+    
+    // Basic null check
+    assertFalse("equals method returns true when passed null", mapA.equals(null));
+    
+    // When entry set is empty, they should be equal
+    assertTrue("Two empty SortedMapWritables are no longer equal", mapA.equals(mapB));
+    
+    // Setup
+    Text[] keys = {
+        new Text("key1"),
+        new Text("key2")
+    };
+    
+    BytesWritable[] values = {
+        new BytesWritable("value1".getBytes()),
+        new BytesWritable("value2".getBytes())
+    };
+    
+    mapA.put(keys[0], values[0]);
+    mapB.put(keys[1], values[1]);
+    
+    // entrySets are different
+    failureReason = "Two SortedMapWritables with different data are now equal";
+    assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
+    assertTrue(failureReason, !mapA.equals(mapB));
+    assertTrue(failureReason, !mapB.equals(mapA));
+    
+    mapA.put(keys[1], values[1]);
+    mapB.put(keys[0], values[0]);
+    
+    // entrySets are now same
+    failureReason = "Two SortedMapWritables with same entry sets formed in different order are now different";
+    assertEquals(failureReason, mapA.hashCode(), mapB.hashCode());
+    assertTrue(failureReason, mapA.equals(mapB));
+    assertTrue(failureReason, mapB.equals(mapA));
+    
+    // Let's check if entry sets of same keys but different values
+    mapA.put(keys[0], values[1]);
+    mapA.put(keys[1], values[0]);
+    
+    failureReason = "Two SortedMapWritables with different content are now equal";
+    assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
+    assertTrue(failureReason, !mapA.equals(mapB));
+    assertTrue(failureReason, !mapB.equals(mapA));
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java Sat Feb  9 21:29:44 2013
@@ -26,6 +26,7 @@ import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.NetUtils;
 
 import java.util.Random;
@@ -586,7 +587,7 @@ public class TestIPC {
   private void assertRetriesOnSocketTimeouts(Configuration conf,
       int maxTimeoutRetries) throws IOException, InterruptedException {
     SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
-    doThrow(new SocketTimeoutException()).when(mockFactory).createSocket();
+    doThrow(new ConnectTimeoutException("fake")).when(mockFactory).createSocket();
     Client client = new Client(IntWritable.class, conf, mockFactory);
     InetSocketAddress address = new InetSocketAddress("127.0.0.1", 9090);
     try {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/GetGroupsTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/GetGroupsTestBase.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/GetGroupsTestBase.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/GetGroupsTestBase.java Sat Feb  9 21:29:44 2013
@@ -108,7 +108,7 @@ public abstract class GetGroupsTestBase 
     for (String group : user.getGroupNames()) {
       expectedOutput += " " + group;
     }
-    return expectedOutput + "\n";
+    return expectedOutput + System.getProperty("line.separator");
   }
   
   private String runTool(Configuration conf, String[] args, boolean success)

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java Sat Feb  9 21:29:44 2013
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.util;
 
+import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
+import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.string2long;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
@@ -26,6 +28,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.test.UnitTestcaseTimeLimit;
+import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 import org.junit.Test;
 
 public class TestStringUtils extends UnitTestcaseTimeLimit {
@@ -134,45 +137,34 @@ public class TestStringUtils extends Uni
   
   @Test
   public void testTraditionalBinaryPrefix() throws Exception {
+    //test string2long(..)
     String[] symbol = {"k", "m", "g", "t", "p", "e"};
     long m = 1024;
     for(String s : symbol) {
-      assertEquals(0, StringUtils.TraditionalBinaryPrefix.string2long(0 + s));
-      assertEquals(m, StringUtils.TraditionalBinaryPrefix.string2long(1 + s));
+      assertEquals(0, string2long(0 + s));
+      assertEquals(m, string2long(1 + s));
       m *= 1024;
     }
     
-    assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0"));
-    assertEquals(1024L, StringUtils.TraditionalBinaryPrefix.string2long("1k"));
-    assertEquals(-1024L, StringUtils.TraditionalBinaryPrefix.string2long("-1k"));
-    assertEquals(1259520L,
-        StringUtils.TraditionalBinaryPrefix.string2long("1230K"));
-    assertEquals(-1259520L,
-        StringUtils.TraditionalBinaryPrefix.string2long("-1230K"));
-    assertEquals(104857600L,
-        StringUtils.TraditionalBinaryPrefix.string2long("100m"));
-    assertEquals(-104857600L,
-        StringUtils.TraditionalBinaryPrefix.string2long("-100M"));
-    assertEquals(956703965184L,
-        StringUtils.TraditionalBinaryPrefix.string2long("891g"));
-    assertEquals(-956703965184L,
-        StringUtils.TraditionalBinaryPrefix.string2long("-891G"));
-    assertEquals(501377302265856L,
-        StringUtils.TraditionalBinaryPrefix.string2long("456t"));
-    assertEquals(-501377302265856L,
-        StringUtils.TraditionalBinaryPrefix.string2long("-456T"));
-    assertEquals(11258999068426240L,
-        StringUtils.TraditionalBinaryPrefix.string2long("10p"));
-    assertEquals(-11258999068426240L,
-        StringUtils.TraditionalBinaryPrefix.string2long("-10P"));
-    assertEquals(1152921504606846976L,
-        StringUtils.TraditionalBinaryPrefix.string2long("1e"));
-    assertEquals(-1152921504606846976L,
-        StringUtils.TraditionalBinaryPrefix.string2long("-1E"));
+    assertEquals(0L, string2long("0"));
+    assertEquals(1024L, string2long("1k"));
+    assertEquals(-1024L, string2long("-1k"));
+    assertEquals(1259520L, string2long("1230K"));
+    assertEquals(-1259520L, string2long("-1230K"));
+    assertEquals(104857600L, string2long("100m"));
+    assertEquals(-104857600L, string2long("-100M"));
+    assertEquals(956703965184L, string2long("891g"));
+    assertEquals(-956703965184L, string2long("-891G"));
+    assertEquals(501377302265856L, string2long("456t"));
+    assertEquals(-501377302265856L, string2long("-456T"));
+    assertEquals(11258999068426240L, string2long("10p"));
+    assertEquals(-11258999068426240L, string2long("-10P"));
+    assertEquals(1152921504606846976L, string2long("1e"));
+    assertEquals(-1152921504606846976L, string2long("-1E"));
 
     String tooLargeNumStr = "10e";
     try {
-      StringUtils.TraditionalBinaryPrefix.string2long(tooLargeNumStr);
+      string2long(tooLargeNumStr);
       fail("Test passed for a number " + tooLargeNumStr + " too large");
     } catch (IllegalArgumentException e) {
       assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage());
@@ -180,7 +172,7 @@ public class TestStringUtils extends Uni
 
     String tooSmallNumStr = "-10e";
     try {
-      StringUtils.TraditionalBinaryPrefix.string2long(tooSmallNumStr);
+      string2long(tooSmallNumStr);
       fail("Test passed for a number " + tooSmallNumStr + " too small");
     } catch (IllegalArgumentException e) {
       assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage());
@@ -189,7 +181,7 @@ public class TestStringUtils extends Uni
     String invalidFormatNumStr = "10kb";
     char invalidPrefix = 'b';
     try {
-      StringUtils.TraditionalBinaryPrefix.string2long(invalidFormatNumStr);
+      string2long(invalidFormatNumStr);
       fail("Test passed for a number " + invalidFormatNumStr
           + " has invalid format");
     } catch (IllegalArgumentException e) {
@@ -199,6 +191,74 @@ public class TestStringUtils extends Uni
           e.getMessage());
     }
 
+    //test long2string(..)
+    assertEquals("0", long2String(0, null, 2));
+    for(int decimalPlace = 0; decimalPlace < 2; decimalPlace++) {
+      for(int n = 1; n < TraditionalBinaryPrefix.KILO.value; n++) {
+        assertEquals(n + "", long2String(n, null, decimalPlace));
+        assertEquals(-n + "", long2String(-n, null, decimalPlace));
+      }
+      assertEquals("1 K", long2String(1L << 10, null, decimalPlace));
+      assertEquals("-1 K", long2String(-1L << 10, null, decimalPlace));
+    }
+
+    assertEquals("8.00 E", long2String(Long.MAX_VALUE, null, 2));
+    assertEquals("8.00 E", long2String(Long.MAX_VALUE - 1, null, 2));
+    assertEquals("-8 E", long2String(Long.MIN_VALUE, null, 2));
+    assertEquals("-8.00 E", long2String(Long.MIN_VALUE + 1, null, 2));
+
+    final String[] zeros = {" ", ".0 ", ".00 "};
+    for(int decimalPlace = 0; decimalPlace < zeros.length; decimalPlace++) {
+      final String trailingZeros = zeros[decimalPlace]; 
+
+      for(int e = 11; e < Long.SIZE - 1; e++) {
+        final TraditionalBinaryPrefix p
+            = TraditionalBinaryPrefix.values()[e/10 - 1]; 
+  
+        { // n = 2^e
+          final long n = 1L << e;
+          final String expected = (n/p.value) + " " + p.symbol;
+          assertEquals("n=" + n, expected, long2String(n, null, 2));
+        }
+  
+        { // n = 2^e + 1
+          final long n = (1L << e) + 1;
+          final String expected = (n/p.value) + trailingZeros + p.symbol;
+          assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
+        }
+  
+        { // n = 2^e - 1
+          final long n = (1L << e) - 1;
+          final String expected = ((n+1)/p.value) + trailingZeros + p.symbol;
+          assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
+        }
+      }
+    }
+
+    assertEquals("1.50 K", long2String(3L << 9, null, 2));
+    assertEquals("1.5 K", long2String(3L << 9, null, 1));
+    assertEquals("1.50 M", long2String(3L << 19, null, 2));
+    assertEquals("2 M", long2String(3L << 19, null, 0));
+    assertEquals("3 G", long2String(3L << 30, null, 2));
+
+    // test byteDesc(..)
+    assertEquals("0 B", StringUtils.byteDesc(0));
+    assertEquals("-100 B", StringUtils.byteDesc(-100));
+    assertEquals("1 KB", StringUtils.byteDesc(1024));
+    assertEquals("1.50 KB", StringUtils.byteDesc(3L << 9));
+    assertEquals("1.50 MB", StringUtils.byteDesc(3L << 19));
+    assertEquals("3 GB", StringUtils.byteDesc(3L << 30));
+    
+    // test formatPercent(..)
+    assertEquals("10%", StringUtils.formatPercent(0.1, 0));
+    assertEquals("10.0%", StringUtils.formatPercent(0.1, 1));
+    assertEquals("10.00%", StringUtils.formatPercent(0.1, 2));
+
+    assertEquals("1%", StringUtils.formatPercent(0.00543, 0));
+    assertEquals("0.5%", StringUtils.formatPercent(0.00543, 1));
+    assertEquals("0.54%", StringUtils.formatPercent(0.00543, 2));
+    assertEquals("0.543%", StringUtils.formatPercent(0.00543, 3));
+    assertEquals("0.5430%", StringUtils.formatPercent(0.00543, 4));
   }
 
   @Test
@@ -314,10 +374,9 @@ public class TestStringUtils extends Uni
         }
         long et = System.nanoTime();
         if (outer > 3) {
-          System.out.println(
-            (useOurs ? "StringUtils impl" : "Java impl") +
-            " #" + outer + ":" +
-            (et - st)/1000000 + "ms");
+          System.out.println( (useOurs ? "StringUtils impl" : "Java impl")
+              + " #" + outer + ":" + (et - st)/1000000 + "ms, components="
+              + components  );
         }
       }
     }



Mime
View raw message