hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1196458 [7/19] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ assembly/ bin/ conf/ dev-support/ hadoop-mapreduce-client/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/ hadoop-mapreduce-client/hadoop-mapreduce-cli...
Date Wed, 02 Nov 2011 05:35:03 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java Wed Nov  2 05:34:31 2011
@@ -111,32 +111,48 @@ public class FileOutputCommitter extends
    * @param context the job's context
    */
   public void commitJob(JobContext context) throws IOException {
-    //delete the task temp directory from the current jobtempdir
-    Path tmpDir = new Path(outputPath, getJobAttemptBaseDirName(context) +
-        Path.SEPARATOR + FileOutputCommitter.TEMP_DIR_NAME);
-    FileSystem fileSys = tmpDir.getFileSystem(context.getConfiguration());
-    if (fileSys.exists(tmpDir)) {
-      fileSys.delete(tmpDir, true);
-    } else {
-      LOG.warn("Task temp dir could not be deleted " + tmpDir);
-    }
-    
-	  //move the job output to final place
-    Path jobOutputPath = 
-        new Path(outputPath, getJobAttemptBaseDirName(context));
-	  moveJobOutputs(outputFileSystem, outputPath, jobOutputPath);
-	  
-    // delete the _temporary folder and create a _done file in the o/p folder
-    cleanupJob(context);
-    if (shouldMarkOutputDir(context.getConfiguration())) {
-      markOutputDirSuccessful(context);
+    if (outputPath != null) {
+      //delete the task temp directory from the current jobtempdir
+      Path tmpDir = new Path(outputPath, getJobAttemptBaseDirName(context) +
+          Path.SEPARATOR + FileOutputCommitter.TEMP_DIR_NAME);
+      FileSystem fileSys = tmpDir.getFileSystem(context.getConfiguration());
+      if (fileSys.exists(tmpDir)) {
+        fileSys.delete(tmpDir, true);
+      } else {
+        LOG.warn("Task temp dir could not be deleted " + tmpDir);
+      }
+
+      //move the job output to final place
+      Path jobOutputPath = 
+          new Path(outputPath, getJobAttemptBaseDirName(context));
+      moveJobOutputs(outputFileSystem, jobOutputPath, outputPath, jobOutputPath);
+
+      // delete the _temporary folder and create a _done file in the o/p folder
+      cleanupJob(context);
+      if (shouldMarkOutputDir(context.getConfiguration())) {
+        markOutputDirSuccessful(context);
+      }
     }
   }
 
-  private void moveJobOutputs(FileSystem fs,
+  /**
+   * Move job output to final location 
+   * @param fs Filesystem handle
+   * @param origJobOutputPath The original location of the job output
+   * Required to generate the relative path for correct moving of data. 
+   * @param finalOutputDir The final output directory to which the job output 
+   *                       needs to be moved
+   * @param jobOutput The current job output directory being moved 
+   * @throws IOException
+   */
+  private void moveJobOutputs(FileSystem fs, final Path origJobOutputPath, 
       Path finalOutputDir, Path jobOutput) throws IOException {
+    LOG.debug("Told to move job output from " + jobOutput
+        + " to " + finalOutputDir + 
+        " and orig job output path is " + origJobOutputPath);    
     if (fs.isFile(jobOutput)) {
-      Path finalOutputPath = getFinalPath(finalOutputDir, jobOutput, jobOutput);
+      Path finalOutputPath = 
+          getFinalPath(finalOutputDir, jobOutput, origJobOutputPath);
       if (!fs.rename(jobOutput, finalOutputPath)) {
         if (!fs.delete(finalOutputPath, true)) {
           throw new IOException("Failed to delete earlier output of job");
@@ -145,14 +161,18 @@ public class FileOutputCommitter extends
           throw new IOException("Failed to save output of job");
         }
       }
-      LOG.debug("Moved " + jobOutput + " to " + finalOutputPath);
+      LOG.debug("Moved job output file from " + jobOutput + " to " + 
+          finalOutputPath);
     } else if (fs.getFileStatus(jobOutput).isDirectory()) {
+      LOG.debug("Job output file " + jobOutput + " is a dir");
       FileStatus[] paths = fs.listStatus(jobOutput);
-      Path finalOutputPath = getFinalPath(finalOutputDir, jobOutput, jobOutput);
+      Path finalOutputPath = 
+          getFinalPath(finalOutputDir, jobOutput, origJobOutputPath);
       fs.mkdirs(finalOutputPath);
+      LOG.debug("Creating dirs along job output path " + finalOutputPath);
       if (paths != null) {
         for (FileStatus path : paths) {
-          moveJobOutputs(fs, finalOutputDir, path.getPath());
+          moveJobOutputs(fs, origJobOutputPath, finalOutputDir, path.getPath());
         }
       }
     }
@@ -233,6 +253,8 @@ public class FileOutputCommitter extends
   throws IOException {
     TaskAttemptID attemptId = context.getTaskAttemptID();
     context.progress();
+    LOG.debug("Told to move taskoutput from " + taskOutput
+        + " to " + jobOutputDir);    
     if (fs.isFile(taskOutput)) {
       Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, 
                                           workPath);
@@ -248,9 +270,11 @@ public class FileOutputCommitter extends
       }
       LOG.debug("Moved " + taskOutput + " to " + finalOutputPath);
     } else if(fs.getFileStatus(taskOutput).isDirectory()) {
+      LOG.debug("Taskoutput " + taskOutput + " is a dir");
       FileStatus[] paths = fs.listStatus(taskOutput);
       Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, workPath);
       fs.mkdirs(finalOutputPath);
+      LOG.debug("Creating dirs along path " + finalOutputPath);
       if (paths != null) {
         for (FileStatus path : paths) {
           moveTaskOutputs(context, fs, jobOutputDir, path.getPath());
@@ -281,12 +305,17 @@ public class FileOutputCommitter extends
    * @throws IOException
    */
   private Path getFinalPath(Path jobOutputDir, Path taskOutput, 
-                            Path taskOutputPath) throws IOException {
-    URI taskOutputUri = taskOutput.toUri();
-    URI relativePath = taskOutputPath.toUri().relativize(taskOutputUri);
+                            Path taskOutputPath) throws IOException {    
+    URI taskOutputUri = taskOutput.makeQualified(outputFileSystem.getUri(), 
+        outputFileSystem.getWorkingDirectory()).toUri();
+    URI taskOutputPathUri = 
+        taskOutputPath.makeQualified(
+            outputFileSystem.getUri(),
+            outputFileSystem.getWorkingDirectory()).toUri();
+    URI relativePath = taskOutputPathUri.relativize(taskOutputUri);
     if (taskOutputUri == relativePath) {
       throw new IOException("Can not get the relative path: base = " + 
-          taskOutputPath + " child = " + taskOutput);
+          taskOutputPathUri + " child = " + taskOutputUri);
     }
     if (relativePath.getPath().length() > 0) {
       return new Path(jobOutputDir, relativePath.getPath());
@@ -334,9 +363,12 @@ public class FileOutputCommitter extends
 
     Path pathToRecover = 
         new Path(outputPath, getJobAttemptBaseDirName(previousAttempt));
+    LOG.debug("Trying to recover task from " + pathToRecover
+        + " into " + jobOutputPath);
     if (outputFileSystem.exists(pathToRecover)) {
       // Move the task outputs to their final place
-      moveJobOutputs(outputFileSystem, jobOutputPath, pathToRecover);
+      moveJobOutputs(outputFileSystem, 
+          pathToRecover, jobOutputPath, pathToRecover);
       LOG.info("Saved output of job to " + jobOutputPath);
     }
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java Wed Nov  2 05:34:31 2011
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.mapreduce.lib.output;
 
+import java.io.IOException;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.mapreduce.JobContext;
@@ -56,6 +58,17 @@ public class NullOutputFormat<K, V> exte
       }
       public void setupJob(JobContext jobContext) { }
       public void setupTask(TaskAttemptContext taskContext) { }
+
+      @Override
+      public boolean isRecoverySupported() {
+        return true;
+      }
+
+      @Override
+      public void recoverTask(TaskAttemptContext taskContext)
+          throws IOException {
+        // Nothing to do for recovering the task.
+      }
     };
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java Wed Nov  2 05:34:31 2011
@@ -39,6 +39,7 @@ import org.apache.hadoop.mapreduce.TaskT
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.mapreduce.v2.LogParams;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -115,6 +116,8 @@ public interface ClientProtocol extends 
    *             MAPREDUCE-2337.
    * Version 37: More efficient serialization format for framework counters
    *             (MAPREDUCE-901)
+   * Version 38: Added getLogFilePath(JobID, TaskAttemptID) as part of 
+   *             MAPREDUCE-3146
    */
   public static final long versionID = 37L;
 
@@ -351,4 +354,16 @@ public interface ClientProtocol extends 
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
                                     ) throws IOException,
                                              InterruptedException;
+  
+  /**
+   * Gets the location of the log file for a job if no taskAttemptId is
+   * specified, otherwise gets the log location for the taskAttemptId.
+   * @param jobID the jobId.
+   * @param taskAttemptID the taskAttemptId.
+   * @return log params.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
+      throws IOException, InterruptedException;
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java Wed Nov  2 05:34:31 2011
@@ -19,8 +19,6 @@
 package org.apache.hadoop.mapreduce.security.token;
 
 import java.io.IOException;
-import java.net.InetAddress;
-import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Collections;
@@ -37,18 +35,10 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.util.StringUtils;
 
@@ -64,14 +54,14 @@ public class DelegationTokenRenewal {
    *
    */
   private static class DelegationTokenToRenew {
-    public final Token<DelegationTokenIdentifier> token;
+    public final Token<?> token;
     public final JobID jobId;
     public final Configuration conf;
     public long expirationDate;
     public TimerTask timerTask;
     
     public DelegationTokenToRenew(
-        JobID jId, Token<DelegationTokenIdentifier> t, 
+        JobID jId, Token<?> t, 
         Configuration newConf, long newExpirationDate) {
       token = t;
       jobId = jId;
@@ -124,10 +114,9 @@ public class DelegationTokenRenewal {
   
   private static class DelegationTokenCancelThread extends Thread {
     private static class TokenWithConf {
-      Token<DelegationTokenIdentifier> token;
+      Token<?> token;
       Configuration conf;
-      TokenWithConf(Token<DelegationTokenIdentifier> token,  
-          Configuration conf) {
+      TokenWithConf(Token<?> token, Configuration conf) {
         this.token = token;
         this.conf = conf;
       }
@@ -139,7 +128,7 @@ public class DelegationTokenRenewal {
       super("Delegation Token Canceler");
       setDaemon(true);
     }
-    public void cancelToken(Token<DelegationTokenIdentifier> token,  
+    public void cancelToken(Token<?> token,  
         Configuration conf) {
       TokenWithConf tokenWithConf = new TokenWithConf(token, conf);
       while (!queue.offer(tokenWithConf)) {
@@ -158,25 +147,21 @@ public class DelegationTokenRenewal {
         TokenWithConf tokenWithConf = null;
         try {
           tokenWithConf = queue.take();
-          DistributedFileSystem dfs = null;
-          try {
-            // do it over rpc. For that we need DFS object
-            dfs = getDFSForToken(tokenWithConf.token, tokenWithConf.conf);
-          } catch (Exception e) {
-            LOG.info("couldn't get DFS to cancel. Will retry over HTTPS");
-            dfs = null;
-          }
-      
-          if(dfs != null) {
-            dfs.cancelDelegationToken(tokenWithConf.token);
-          } else {
-            cancelDelegationTokenOverHttps(tokenWithConf.token, 
-                                           tokenWithConf.conf);
-          }
+          final TokenWithConf current = tokenWithConf;
+          
           if (LOG.isDebugEnabled()) {
-            LOG.debug("Canceling token " + tokenWithConf.token.getService() +  
-                " for dfs=" + dfs);
+            LOG.debug("Canceling token " + tokenWithConf.token.getService());
           }
+          // need to use doAs so that http can find the kerberos tgt
+          UserGroupInformation.getLoginUser().doAs(
+              new PrivilegedExceptionAction<Void>() {
+
+                @Override
+                public Void run() throws Exception {
+                  current.token.cancel(current.conf);
+                  return null;
+                }
+              });
         } catch (IOException e) {
           LOG.warn("Failed to cancel token " + tokenWithConf.token + " " +  
               StringUtils.stringifyException(e));
@@ -195,119 +180,29 @@ public class DelegationTokenRenewal {
     delegationTokens.add(t);
   }
   
-  // kind of tokens we currently renew
-  private static final Text kindHdfs = 
-    DelegationTokenIdentifier.HDFS_DELEGATION_KIND;
-  
-  @SuppressWarnings("unchecked")
   public static synchronized void registerDelegationTokensForRenewal(
-      JobID jobId, Credentials ts, Configuration conf) {
+      JobID jobId, Credentials ts, Configuration conf) throws IOException {
     if(ts==null)
       return; //nothing to add
     
-    Collection <Token<? extends TokenIdentifier>> tokens = ts.getAllTokens();
+    Collection <Token<?>> tokens = ts.getAllTokens();
     long now = System.currentTimeMillis();
-    
-    for(Token<? extends TokenIdentifier> t : tokens) {
-      // currently we only check for HDFS delegation tokens
-      // later we can add more different types.
-      if(! t.getKind().equals(kindHdfs)) {
-        continue; 
-      }
-      Token<DelegationTokenIdentifier> dt = 
-        (Token<DelegationTokenIdentifier>)t;
-      
-      // first renew happens immediately
-      DelegationTokenToRenew dtr = 
-        new DelegationTokenToRenew(jobId, dt, conf, now); 
-
-      addTokenToList(dtr);
-      
-      setTimerForTokenRenewal(dtr, true);
-      LOG.info("registering token for renewal for service =" + dt.getService()+
-          " and jobID = " + jobId);
-    }
-  }
-  
-  private static String getHttpAddressForToken(
-      Token<DelegationTokenIdentifier> token, final Configuration conf) 
-  throws IOException {
-
-    String[] ipaddr = token.getService().toString().split(":");
 
-    InetAddress iaddr = InetAddress.getByName(ipaddr[0]);
-    String dnsName = iaddr.getCanonicalHostName();
-    
-    // in case it is a different cluster it may have a different port
-    String httpsPort = conf.get("dfs.hftp.https.port");
-    if(httpsPort == null) {
-      // get from this cluster
-      httpsPort = conf.get(DFSConfigKeys.DFS_HTTPS_PORT_KEY, 
-          "" + DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
-    }
-
-    // always use https (it is for security only)
-    return "https://" + dnsName+":"+httpsPort;
-  }
-
-  protected static long renewDelegationTokenOverHttps(
-      final Token<DelegationTokenIdentifier> token, final Configuration conf) 
-  throws InterruptedException, IOException{
-    final String httpAddress = getHttpAddressForToken(token, conf);
-    // will be chaged to debug
-    LOG.info("address to renew=" + httpAddress + "; tok=" + token.getService());
-    Long expDate = (Long) UserGroupInformation.getLoginUser().doAs(
-        new PrivilegedExceptionAction<Long>() {
-          public Long run() throws IOException {
-            return DelegationTokenFetcher.renewDelegationToken(httpAddress, token);  
-          }
-        });
-    LOG.info("Renew over HTTP done. addr="+httpAddress+";res="+expDate);
-    return expDate;
-  }
-  
-  private static long renewDelegationToken(DelegationTokenToRenew dttr) 
-  throws Exception {
-    long newExpirationDate=System.currentTimeMillis()+3600*1000;
-    Token<DelegationTokenIdentifier> token = dttr.token;
-    Configuration conf = dttr.conf;
-    if(token.getKind().equals(kindHdfs)) {
-      DistributedFileSystem dfs=null;
-    
-      try {
-        // do it over rpc. For that we need DFS object
-        dfs = getDFSForToken(token, conf);
-      } catch (IOException e) {
-        LOG.info("couldn't get DFS to renew. Will retry over HTTPS");
-        dfs = null;
-      }
-      
-      try {
-        if(dfs != null)
-          newExpirationDate = dfs.renewDelegationToken(token);
-        else {
-          // try HTTP
-          newExpirationDate = renewDelegationTokenOverHttps(token, conf);
-        }
-      } catch (InvalidToken ite) {
-        LOG.warn("invalid token - not scheduling for renew");
-        removeFailedDelegationToken(dttr);
-        throw new IOException("failed to renew token", ite);
-      } catch (AccessControlException ioe) {
-        LOG.warn("failed to renew token:"+token, ioe);
-        removeFailedDelegationToken(dttr);
-        throw new IOException("failed to renew token", ioe);
-      } catch (Exception e) {
-        LOG.warn("failed to renew token:"+token, e);
-        // returns default expiration date
+    for (Token<?> t : tokens) {
+      // first renew happens immediately
+      if (t.isManaged()) {
+        DelegationTokenToRenew dtr = new DelegationTokenToRenew(jobId, t, conf,
+            now);
+
+        addTokenToList(dtr);
+
+        setTimerForTokenRenewal(dtr, true);
+        LOG.info("registering token for renewal for service =" + t.getService()
+            + " and jobID = " + jobId);
       }
-    } else {
-      throw new Exception("unknown token type to renew:"+token.getKind());
     }
-    return newExpirationDate;
   }
-
-  
+    
   /**
    * Task - to renew a token
    *
@@ -319,43 +214,31 @@ public class DelegationTokenRenewal {
     
     @Override
     public void run() {
-      Token<DelegationTokenIdentifier> token = dttr.token;
+      Token<?> token = dttr.token;
       long newExpirationDate=0;
       try {
-        newExpirationDate = renewDelegationToken(dttr);
+        // need to use doAs so that http can find the kerberos tgt
+        dttr.expirationDate = UserGroupInformation.getLoginUser().doAs(
+            new PrivilegedExceptionAction<Long>() {
+
+              @Override
+              public Long run() throws Exception {
+                return dttr.token.renew(dttr.conf);
+              }
+            });
+
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("renewing for:" + token.getService() + ";newED="
+              + dttr.expirationDate);
+        }
+        setTimerForTokenRenewal(dttr, false);// set the next one
       } catch (Exception e) {
-        return; // message logged in renewDT method
+        LOG.error("Exception renewing token" + token + ". Not rescheduled", e);
+        removeFailedDelegationToken(dttr);
       }
-      if (LOG.isDebugEnabled())
-        LOG.debug("renewing for:"+token.getService()+";newED=" + 
-            newExpirationDate);
-      
-      // new expiration date
-      dttr.expirationDate = newExpirationDate;
-      setTimerForTokenRenewal(dttr, false);// set the next one
     }
   }
   
-  private static DistributedFileSystem getDFSForToken(
-      Token<DelegationTokenIdentifier> token, final Configuration conf) 
-  throws Exception {
-    DistributedFileSystem dfs = null;
-    try {
-      final URI uri = new URI (SCHEME + "://" + token.getService().toString());
-      dfs = 
-      UserGroupInformation.getLoginUser().doAs(
-          new PrivilegedExceptionAction<DistributedFileSystem>() {
-        public DistributedFileSystem run() throws IOException {
-          return (DistributedFileSystem) FileSystem.get(uri, conf);  
-        }
-      });
-    } catch (Exception e) {
-      LOG.warn("Failed to create a dfs to renew/cancel for:" + token.getService(), e);
-      throw e;
-    } 
-    return dfs;
-  }
-  
   /**
    * find the soonest expiring token and set it for renew
    */
@@ -372,15 +255,11 @@ public class DelegationTokenRenewal {
       renewIn = now + expiresIn - expiresIn/10; // little before expiration
     }
     
-    try {
-      // need to create new timer every time
-      TimerTask tTask = new RenewalTimerTask(token);
-      token.setTimerTask(tTask); // keep reference to the timer
-
-      renewalTimer.schedule(token.timerTask, new Date(renewIn));
-    } catch (Exception e) {
-      LOG.warn("failed to schedule a task, token will not renew more", e);
-    }
+    // need to create new timer every time
+    TimerTask tTask = new RenewalTimerTask(token);
+    token.setTimerTask(tTask); // keep reference to the timer
+
+    renewalTimer.schedule(token.timerTask, new Date(renewIn));
   }
 
   /**
@@ -391,33 +270,9 @@ public class DelegationTokenRenewal {
     delegationTokens.clear();
   }
   
-  
-  protected static void cancelDelegationTokenOverHttps(
-      final Token<DelegationTokenIdentifier> token, final Configuration conf) 
-  throws InterruptedException, IOException{
-    final String httpAddress = getHttpAddressForToken(token, conf);
-    // will be chaged to debug
-    LOG.info("address to cancel=" + httpAddress + "; tok=" + token.getService());
-    
-    UserGroupInformation.getLoginUser().doAs(
-        new PrivilegedExceptionAction<Void>() {
-          public Void run() throws IOException {
-            DelegationTokenFetcher.cancelDelegationToken(httpAddress, token);
-            return null;
-          }
-        });
-    LOG.info("Cancel over HTTP done. addr="+httpAddress);
-  }
-  
-  
   // cancel a token
   private static void cancelToken(DelegationTokenToRenew t) {
-    Token<DelegationTokenIdentifier> token = t.token;
-    Configuration conf = t.conf;
-    
-    if(token.getKind().equals(kindHdfs)) {
-      dtCancelThread.cancelToken(token, conf);
-    }
+    dtCancelThread.cancelToken(t.token, t.conf);
   }
   
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java Wed Nov  2 05:34:31 2011
@@ -25,6 +25,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -35,7 +36,7 @@ import org.apache.hadoop.security.UserGr
 @InterfaceStability.Unstable
 public class JobTokenIdentifier extends TokenIdentifier {
   private Text jobid;
-  final static Text KIND_NAME = new Text("mapreduce.job");
+  public final static Text KIND_NAME = new Text("mapreduce.job");
   
   /**
    * Default constructor
@@ -86,4 +87,12 @@ public class JobTokenIdentifier extends 
   public void write(DataOutput out) throws IOException {
     jobid.write(out);
   }
+
+  @InterfaceAudience.Private
+  public static class Renewer extends Token.TrivialRenewer {
+    @Override
+    protected Text getKind() {
+      return KIND_NAME;
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java Wed Nov  2 05:34:31 2011
@@ -30,7 +30,7 @@ import org.apache.hadoop.security.token.
 @InterfaceStability.Unstable
 public class DelegationTokenIdentifier 
     extends AbstractDelegationTokenIdentifier {
-  static final Text MAPREDUCE_DELEGATION_KIND = 
+  public static final Text MAPREDUCE_DELEGATION_KIND = 
     new Text("MAPREDUCE_DELEGATION_TOKEN");
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java Wed Nov  2 05:34:31 2011
@@ -176,11 +176,15 @@ public class ReduceContextImpl<KEYIN,VAL
     return value;
   }
   
+  BackupStore<KEYIN,VALUEIN> getBackupStore() {
+    return backupStore;
+  }
+  
   protected class ValueIterator implements ReduceContext.ValueIterator<VALUEIN> {
 
     private boolean inReset = false;
     private boolean clearMarkFlag = false;
-    
+
     @Override
     public boolean hasNext() {
       try {
@@ -247,7 +251,7 @@ public class ReduceContextImpl<KEYIN,VAL
 
     @Override
     public void mark() throws IOException {
-      if (backupStore == null) {
+      if (getBackupStore() == null) {
         backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
       }
       isMarked = true;
@@ -290,7 +294,7 @@ public class ReduceContextImpl<KEYIN,VAL
 
     @Override
     public void clearMark() throws IOException {
-      if (backupStore == null) {
+      if (getBackupStore() == null) {
         return;
       }
       if (inReset) {
@@ -308,7 +312,7 @@ public class ReduceContextImpl<KEYIN,VAL
      * @throws IOException
      */
     public void resetBackupStore() throws IOException {
-      if (backupStore == null) {
+      if (getBackupStore() == null) {
         return;
       }
       inReset = isMarked = false;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java Wed Nov  2 05:34:31 2011
@@ -60,7 +60,7 @@ class EventFetcher<K,V> extends Thread {
     LOG.info(reduce + " Thread started: " + getName());
     
     try {
-      while (true) {
+      while (true && !Thread.currentThread().isInterrupted()) {
         try {
           int numNewMaps = getMapCompletionEvents();
           failures = 0;
@@ -68,7 +68,9 @@ class EventFetcher<K,V> extends Thread {
             LOG.info(reduce + ": " + "Got " + numNewMaps + " new map-outputs");
           }
           LOG.debug("GetMapEventsThread about to sleep for " + SLEEP_TIME);
-          Thread.sleep(SLEEP_TIME);
+          if (!Thread.currentThread().isInterrupted()) {
+            Thread.sleep(SLEEP_TIME);
+          }
         } catch (IOException ie) {
           LOG.info("Exception in getting events", ie);
           // check to see whether to abort
@@ -76,7 +78,9 @@ class EventFetcher<K,V> extends Thread {
             throw new IOException("too many failures downloading events", ie);
           }
           // sleep for a bit
-          Thread.sleep(RETRY_PERIOD);
+          if (!Thread.currentThread().isInterrupted()) {
+            Thread.sleep(RETRY_PERIOD);
+          }
         }
       }
     } catch (InterruptedException e) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java Wed Nov  2 05:34:31 2011
@@ -135,7 +135,7 @@ class Fetcher<K,V> extends Thread {
   
   public void run() {
     try {
-      while (true) {
+      while (true && !Thread.currentThread().isInterrupted()) {
         MapHost host = null;
         try {
           // If merge is on, block

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java Wed Nov  2 05:34:31 2011
@@ -42,9 +42,11 @@ import org.apache.hadoop.mapreduce.TaskR
 import org.apache.hadoop.mapreduce.TaskTrackerInfo;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.jobhistory.HistoryViewer;
+import org.apache.hadoop.mapreduce.v2.LogParams;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogDumper;
 
 /**
  * Interprets the map reduce cli options 
@@ -53,6 +55,7 @@ import org.apache.hadoop.util.ToolRunner
 @InterfaceStability.Stable
 public class CLI extends Configured implements Tool {
   private static final Log LOG = LogFactory.getLog(CLI.class);
+  private Cluster cluster;
 
   public CLI() {
   }
@@ -94,6 +97,7 @@ public class CLI extends Configured impl
     boolean killTask = false;
     boolean failTask = false;
     boolean setJobPriority = false;
+    boolean logs = false;
 
     if ("-submit".equals(cmd)) {
       if (argv.length != 2) {
@@ -204,13 +208,26 @@ public class CLI extends Configured impl
       taskType = argv[2];
       taskState = argv[3];
       displayTasks = true;
+    } else if ("-logs".equals(cmd)) {
+      if (argv.length == 2 || argv.length ==3) {
+        logs = true;
+        jobid = argv[1];
+        if (argv.length == 3) {
+          taskid = argv[2];
+        }  else {
+          taskid = null;
+        }
+      } else {
+        displayUsage(cmd);
+        return exitCode;
+      }
     } else {
       displayUsage(cmd);
       return exitCode;
     }
 
     // initialize cluster
-    Cluster cluster = new Cluster(getConf());
+    cluster = new Cluster(getConf());
         
     // Submit the request
     try {
@@ -312,6 +329,22 @@ public class CLI extends Configured impl
           System.out.println("Could not fail task " + taskid);
           exitCode = -1;
         }
+      } else if (logs) {
+        try {
+        JobID jobID = JobID.forName(jobid);
+        TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskid);
+        LogParams logParams = cluster.getLogParams(jobID, taskAttemptID);
+        LogDumper logDumper = new LogDumper();
+        logDumper.setConf(getConf());
+        logDumper.dumpAContainersLogs(logParams.getApplicationId(),
+            logParams.getContainerId(), logParams.getNodeId(),
+            logParams.getOwner());
+        } catch (IOException e) {
+          if (e instanceof RemoteException) {
+            throw e;
+          } 
+          System.out.println(e.getMessage());
+        }
       }
     } catch (RemoteException re) {
       IOException unwrappedException = re.unwrapRemoteException();
@@ -379,6 +412,10 @@ public class CLI extends Configured impl
           " <job-id> <task-type> <task-state>]. " +
           "Valid values for <task-type> are " + taskTypes + ". " +
           "Valid values for <task-state> are " + taskStates);
+    } else if ("-logs".equals(cmd)) {
+      System.err.println(prefix + "[" + cmd +
+          " <job-id> <task-attempt-id>]. " +
+          " <task-attempt-id> is optional to get task attempt logs.");      
     } else {
       System.err.printf(prefix + "<command> <args>\n");
       System.err.printf("\t[-submit <job-file>]\n");
@@ -397,7 +434,8 @@ public class CLI extends Configured impl
         "Valid values for <task-type> are " + taskTypes + ". " +
         "Valid values for <task-state> are " + taskStates);
       System.err.printf("\t[-kill-task <task-attempt-id>]\n");
-      System.err.printf("\t[-fail-task <task-attempt-id>]\n\n");
+      System.err.printf("\t[-fail-task <task-attempt-id>]\n");
+      System.err.printf("\t[-logs <job-id> <task-attempt-id>]\n\n");
       ToolRunner.printGenericCommandUsage(System.out);
     }
   }
@@ -527,12 +565,26 @@ public class CLI extends Configured impl
       throws IOException, InterruptedException {
     System.out.println("Total jobs:" + jobs.length);
     System.out.println("JobId\tState\tStartTime\t" +
-        "UserName\tQueue\tPriority\tSchedulingInfo");
+        "UserName\tQueue\tPriority\tMaps\tReduces\tUsedContainers\t" +
+        "RsvdContainers\tUsedMem\tRsvdMem\tNeededMem\tAM info");
     for (JobStatus job : jobs) {
-      System.out.printf("%s\t%s\t%d\t%s\t%s\t%s\t%s\n", job.getJobID().toString(),
-          job.getState(), job.getStartTime(),
+      TaskReport[] mapReports =
+                 cluster.getJob(job.getJobID()).getTaskReports(TaskType.MAP);
+      TaskReport[] reduceReports =
+                 cluster.getJob(job.getJobID()).getTaskReports(TaskType.REDUCE);
+
+      System.out.printf("%s\t%s\t%d\t%s\t%s\t%s\t%d\t%d\t%d\t%d\t%dM\t%dM\t%dM\t%s\n",
+          job.getJobID().toString(), job.getState(), job.getStartTime(),
           job.getUsername(), job.getQueue(), 
-          job.getPriority().name(), job.getSchedulingInfo());
+          job.getPriority().name(),
+          mapReports.length,
+          reduceReports.length,
+          job.getNumUsedSlots(),
+          job.getNumReservedSlots(),
+          job.getUsedMem(),
+          job.getReservedMem(),
+          job.getNeededMem(),
+          job.getSchedulingInfo());
     }
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java Wed Nov  2 05:34:31 2011
@@ -40,6 +40,8 @@ public class ConfigUtil {
     addDeprecatedKeys();
     Configuration.addDefaultResource("mapred-default.xml");
     Configuration.addDefaultResource("mapred-site.xml");
+    Configuration.addDefaultResource("yarn-default.xml");
+    Configuration.addDefaultResource("yarn-site.xml");
   }
   
   /**
@@ -175,11 +177,11 @@ public class ConfigUtil {
     Configuration.addDeprecation("tasktracker.contention.tracking", 
       new String[] {TTConfig.TT_CONTENTION_TRACKING});
     Configuration.addDeprecation("job.end.notification.url", 
-      new String[] {MRJobConfig.END_NOTIFICATION_URL});
+      new String[] {MRJobConfig.MR_JOB_END_NOTIFICATION_URL});
     Configuration.addDeprecation("job.end.retry.attempts", 
-      new String[] {MRJobConfig.END_NOTIFICATION_RETRIES});
+      new String[] {MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS});
     Configuration.addDeprecation("job.end.retry.interval", 
-      new String[] {MRJobConfig.END_NOTIFICATION_RETRIE_INTERVAL});
+      new String[] {MRJobConfig.MR_JOB_END_RETRY_INTERVAL});
     Configuration.addDeprecation("mapred.committer.job.setup.cleanup.needed", 
       new String[] {MRJobConfig.SETUP_CLEANUP_NEEDED});
     Configuration.addDeprecation("mapred.jar", 
@@ -512,6 +514,15 @@ public class ConfigUtil {
     
     Configuration.addDeprecation("webinterface.private.actions", 
         new String[]{JTConfig.PRIVATE_ACTIONS_KEY});
+    
+    Configuration.addDeprecation("security.task.umbilical.protocol.acl", 
+        new String[] {
+        MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_TASK_UMBILICAL   
+    });
+    Configuration.addDeprecation("security.job.submission.protocol.acl", 
+        new String[] {
+        MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT   
+    });
   }
 }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml Wed Nov  2 05:34:31 2011
@@ -1166,7 +1166,7 @@
 
 <property>
   <name>mapreduce.framework.name</name>
-  <value>yarn</value>
+  <value>local</value>
   <description>The runtime framework for executing MapReduce jobs.
   Can be one of local, classic or yarn.
   </description>
@@ -1179,4 +1179,57 @@
   </description>
 </property>
 
+<property>
+  <name>mapreduce.job.end-notification.max.attempts</name>
+  <value>5</value>
+  <final>true</final>
+  <description>The maximum number of times a URL will be read for providing job
+    end notification. Cluster administrators can set this to limit how long
+    after end of a job, the Application Master waits before exiting. Must be
+    marked as final to prevent users from overriding this.
+  </description>
+</property>
+
+<property>
+  <name>mapreduce.job.end-notification.max.retry.interval</name>
+  <value>5</value>
+  <final>true</final>
+  <description>The maximum amount of time (in seconds) to wait before retrying
+    job end notification. Cluster administrators can set this to limit how long
+    the Application Master waits before exiting. Must be marked as final to
+    prevent users from overriding this.</description>
+</property>
+
+<property>
+  <name>mapreduce.job.end-notification.url</name>
+  <value></value>
+  <description>The URL to send job end notification. It may contain sentinels
+    $jobId and $jobStatus which will be replaced with jobId and jobStatus.
+  </description>
+</property>
+
+<property>
+  <name>mapreduce.job.end-notification.retry.attempts</name>
+  <value>5</value>
+  <description>The number of times the submitter of the job wants to retry job
+    end notification if it fails. This is capped by
+    mapreduce.job.end-notification.max.attempts</description>
+</property>
+
+<property>
+  <name>mapreduce.job.end-notification.retry.interval</name>
+  <value>1</value>
+  <description>The number of seconds the submitter of the job wants to wait
+    before job end notification is retried if it fails. This is capped by
+    mapreduce.job.end-notification.max.retry.interval</description>
+</property>
+
+<property>
+  <name>mapreduce.job.user.name</name>
+  <value>${user.name}</value>
+  <description>The user name for the job submitter, configurable only in
+  non-secure mode. In secure mode Kerberos authentication is necessary.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Nov  2 05:34:31 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml:1166973-1179483
+/hadoop/common/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml:1166973-1196451
 /hadoop/core/branches/branch-0.19/mapred/src/java/mapred-default.xml:713112
 /hadoop/core/trunk/src/mapred/mapred-default.xml:776175-785643

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java Wed Nov  2 05:34:31 2011
@@ -26,10 +26,13 @@ import java.net.URI;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.MapFile;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -75,6 +78,20 @@ public class TestFileOutputCommitter ext
     }
   }
 
+  private void writeMapFileOutput(RecordWriter theRecordWriter,
+      TaskAttemptContext context) throws IOException, InterruptedException {
+    try {
+      int key = 0;
+      for (int i = 0 ; i < 10; ++i) {
+        key = i;
+        Text val = (i%2 == 1) ? val1 : val2;
+        theRecordWriter.write(new LongWritable(key),
+            val);        
+      }
+    } finally {
+      theRecordWriter.close(context);
+    }
+  }
   
   public void testRecovery() throws Exception {
     Job job = Job.getInstance();
@@ -101,9 +118,7 @@ public class TestFileOutputCommitter ext
         FileOutputCommitter.getJobAttemptBaseDirName(
             conf.getInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 0)));
     assertTrue((new File(jobTempDir1.toString()).exists()));
-    validateContent(jobTempDir1);
-    
-    
+    validateContent(jobTempDir1);    
     
     //now while running the second app attempt, 
     //recover the task output from first attempt
@@ -141,6 +156,29 @@ public class TestFileOutputCommitter ext
     assertEquals(output, expectedOutput.toString());
   }
 
+  private void validateMapFileOutputContent(
+      FileSystem fs, Path dir) throws IOException {
+    // map output is a directory with index and data files
+    Path expectedMapDir = new Path(dir, partFile);
+    assert(fs.getFileStatus(expectedMapDir).isDirectory());    
+    FileStatus[] files = fs.listStatus(expectedMapDir);
+    int fileCount = 0;
+    boolean dataFileFound = false; 
+    boolean indexFileFound = false; 
+    for (FileStatus f : files) {
+      if (f.isFile()) {
+        ++fileCount;
+        if (f.getPath().getName().equals(MapFile.INDEX_FILE_NAME)) {
+          indexFileFound = true;
+        }
+        else if (f.getPath().getName().equals(MapFile.DATA_FILE_NAME)) {
+          dataFileFound = true;
+        }
+      }
+    }
+    assert(fileCount > 0);
+    assert(dataFileFound && indexFileFound);
+  }
   
   public void testCommitter() throws Exception {
     Job job = Job.getInstance();
@@ -169,6 +207,32 @@ public class TestFileOutputCommitter ext
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  public void testMapFileOutputCommitter() throws Exception {
+    Job job = Job.getInstance();
+    FileOutputFormat.setOutputPath(job, outDir);
+    Configuration conf = job.getConfiguration();
+    conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
+    JobContext jContext = new JobContextImpl(conf, taskID.getJobID());    
+    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
+    FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
+
+    // setup
+    committer.setupJob(jContext);
+    committer.setupTask(tContext);
+
+    // write output
+    MapFileOutputFormat theOutputFormat = new MapFileOutputFormat();
+    RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
+    writeMapFileOutput(theRecordWriter, tContext);
+
+    // do commit
+    committer.commitTask(tContext);
+    committer.commitJob(jContext);
+
+    // validate output
+    validateMapFileOutputContent(FileSystem.get(job.getConfiguration()), outDir);
+    FileUtil.fullyDelete(new File(outDir.toString()));
+  }
   
   public void testAbort() throws IOException, InterruptedException {
     Job job = Job.getInstance();

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml Wed Nov  2 05:34:31 2011
@@ -16,16 +16,17 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>${hadoop-mapreduce.version}</version>
+    <version>0.24.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-mapreduce-client-hs</artifactId>
+  <version>0.24.0-SNAPSHOT</version>
   <name>hadoop-mapreduce-client-hs</name>
 
   <properties>
-    <install.file>${project.artifact.file}</install.file>
-    <mr.basedir>${project.parent.parent.basedir}</mr.basedir>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <mr.basedir>${project.parent.basedir}/..</mr.basedir>
   </properties>
 
   <dependencies>
@@ -50,15 +51,4 @@
       <scope>test</scope>
     </dependency>
   </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <excludes>
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-   </build>
 </project>

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java Wed Nov  2 05:34:31 2011
@@ -37,6 +37,7 @@ import org.apache.hadoop.mapreduce.TypeC
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@@ -49,6 +50,7 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.YarnException;
@@ -72,19 +74,20 @@ public class CompletedJob implements org
   private final Map<TaskId, Task> reduceTasks = new HashMap<TaskId, Task>();
   private final String user;
   private final Path confFile;
-  
+  private JobACLsManager aclsMgr;
   private List<TaskAttemptCompletionEvent> completionEvents = null;
   private JobInfo jobInfo;
 
   public CompletedJob(Configuration conf, JobId jobId, Path historyFile, 
-      boolean loadTasks, String userName, Path confFile) throws IOException {
+      boolean loadTasks, String userName, Path confFile, JobACLsManager aclsMgr) 
+          throws IOException {
     LOG.info("Loading job: " + jobId + " from file: " + historyFile);
     this.conf = conf;
     this.jobId = jobId;
     this.confFile = confFile;
+    this.aclsMgr = aclsMgr;
     
     loadFullHistoryData(loadTasks, historyFile);
-
     user = userName;
     counters = TypeConverter.toYarn(jobInfo.getTotalCounters());
     diagnostics.add(jobInfo.getErrorInfo());
@@ -93,6 +96,7 @@ public class CompletedJob implements org
             JobReport.class);
     report.setJobId(jobId);
     report.setJobState(JobState.valueOf(jobInfo.getJobStatus()));
+    report.setSubmitTime(jobInfo.getSubmitTime());
     report.setStartTime(jobInfo.getLaunchTime());
     report.setFinishTime(jobInfo.getFinishTime());
     report.setJobName(jobInfo.getJobname());
@@ -102,6 +106,7 @@ public class CompletedJob implements org
     report.setJobFile(confFile.toString());
     report.setTrackingUrl(JobHistoryUtils.getHistoryUrl(conf, TypeConverter
         .toYarn(TypeConverter.fromYarn(jobId)).getAppId()));
+    report.setAMInfos(getAMInfos());
   }
 
   @Override
@@ -310,7 +315,6 @@ public class CompletedJob implements org
     }
     Map<JobACL, AccessControlList> jobACLs = jobInfo.getJobACLs();
     AccessControlList jobACL = jobACLs.get(jobOperation);
-    JobACLsManager aclsMgr = new JobACLsManager(conf);
     return aclsMgr.checkAccess(callerUGI, jobOperation, 
         jobInfo.getUsername(), jobACL);
   }
@@ -337,4 +341,20 @@ public class CompletedJob implements org
   public Path getConfFile() {
     return confFile;
   }
+
+  @Override
+  public List<AMInfo> getAMInfos() {
+    List<AMInfo> amInfos = new LinkedList<AMInfo>();
+    for (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.AMInfo jhAmInfo : jobInfo
+        .getAMInfos()) {
+      AMInfo amInfo =
+          MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
+              jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
+              jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
+              jhAmInfo.getNodeManagerHttpPort());
+   
+      amInfos.add(amInfo);
+    }
+    return amInfos;
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java Wed Nov  2 05:34:31 2011
@@ -29,8 +29,6 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 
@@ -81,39 +79,35 @@ public class CompletedTaskAttempt implem
 //    report.setPhase(attemptInfo.get); //TODO
     report.setStateString(attemptInfo.getState());
     report.setCounters(getCounters());
+    report.setContainerId(attemptInfo.getContainerId());
+    String []hostSplits = attemptInfo.getHostname().split(":");
+    if (hostSplits.length != 2) {
+      report.setNodeManagerHost("UNKNOWN");
+    } else {
+      report.setNodeManagerHost(hostSplits[0]);
+      report.setNodeManagerPort(Integer.parseInt(hostSplits[1]));
+    }
+    report.setNodeManagerHttpPort(attemptInfo.getHttpPort());
   }
 
   @Override
   public ContainerId getAssignedContainerID() {
-    //TODO ContainerId needs to be part of some historyEvent to be able to 
-    //render the log directory.
-    ContainerId containerId = 
-        RecordFactoryProvider.getRecordFactory(null).newRecordInstance(
-            ContainerId.class);
-    containerId.setId(-1);
-    ApplicationAttemptId applicationAttemptId =
-        RecordFactoryProvider.getRecordFactory(null).newRecordInstance(
-            ApplicationAttemptId.class);
-    applicationAttemptId.setAttemptId(-1);
-    ApplicationId applicationId =
-        RecordFactoryProvider.getRecordFactory(null).newRecordInstance(
-            ApplicationId.class);
-    applicationId.setClusterTimestamp(-1);
-    applicationId.setId(-1);
-    applicationAttemptId.setApplicationId(applicationId);
-    containerId.setApplicationAttemptId(applicationAttemptId);
-    return containerId;
+    return attemptInfo.getContainerId();
   }
 
   @Override
   public String getAssignedContainerMgrAddress() {
-    // TODO Verify this is correct.
-    return attemptInfo.getTrackerName();
+    return attemptInfo.getHostname();
   }
 
   @Override
   public String getNodeHttpAddress() {
-    return attemptInfo.getHostname() + ":" + attemptInfo.getHttpPort();
+    return attemptInfo.getTrackerName() + ":" + attemptInfo.getHttpPort();
+  }
+  
+  @Override
+  public String getNodeRackName() {
+    return attemptInfo.getRackname();
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java Wed Nov  2 05:34:31 2011
@@ -27,11 +27,12 @@ import java.security.PrivilegedException
 import java.util.Arrays;
 import java.util.Collection;
 
-import org.apache.avro.ipc.Server;
+import org.apache.hadoop.ipc.Server;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
@@ -62,14 +63,12 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider;
 import org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebApp;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
-import org.apache.hadoop.mapreduce.v2.security.client.ClientHSSecurityInfo;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityInfo;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.YarnException;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -106,7 +105,9 @@ public class HistoryClientService extend
     initializeWebApp(conf);
     String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS,
         JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
-    InetSocketAddress address = NetUtils.createSocketAddr(serviceAddr);
+    InetSocketAddress address = NetUtils.createSocketAddr(serviceAddr,
+      JHAdminConfig.DEFAULT_MR_HISTORY_PORT,
+      JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
     InetAddress hostNameResolved = null;
     try {
       hostNameResolved = InetAddress.getLocalHost(); //address.getAddress().getLocalHost();
@@ -119,6 +120,14 @@ public class HistoryClientService extend
             conf, null,
             conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT, 
                 JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT));
+    
+    // Enable service authorization?
+    if (conf.getBoolean(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
+        false)) {
+      server.refreshServiceAcl(conf, new MRAMPolicyProvider());
+    }
+    
     server.start();
     this.bindAddress =
         NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
@@ -132,13 +141,13 @@ public class HistoryClientService extend
     webApp = new HsWebApp(history);
     String bindAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
         JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
-    WebApps.$for("jobhistory", this).at(bindAddress).start(webApp); 
+    WebApps.$for("jobhistory", this).with(conf).at(bindAddress).start(webApp); 
   }
 
   @Override
   public void stop() {
     if (server != null) {
-      server.close();
+      server.stop();
     }
     if (webApp != null) {
       webApp.stop();

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java Wed Nov  2 05:34:31 2011
@@ -32,6 +32,7 @@ import java.util.TreeMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
@@ -47,6 +48,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobSummary;
@@ -64,6 +66,8 @@ import org.apache.hadoop.yarn.event.Even
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.service.AbstractService;
 
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 
 /*
  * Loads and manages the Job history cache.
@@ -71,7 +75,7 @@ import org.apache.hadoop.yarn.service.Ab
 public class JobHistory extends AbstractService implements HistoryContext   {
 
   private static final int DEFAULT_JOBLIST_CACHE_SIZE = 20000;
-  private static final int DEFAULT_LOADEDJOB_CACHE_SIZE = 2000;
+  private static final int DEFAULT_LOADEDJOB_CACHE_SIZE = 5;
   private static final int DEFAULT_DATESTRING_CACHE_SIZE = 200000;
   private static final long DEFAULT_MOVE_THREAD_INTERVAL = 3 * 60 * 1000l; //3 minutes
   private static final int DEFAULT_MOVE_THREAD_COUNT = 3;
@@ -122,6 +126,8 @@ public class JobHistory extends Abstract
   //The number of jobs to maintain in the job list cache.
   private int jobListCacheSize;
   
+  private JobACLsManager aclsMgr;
+  
   //The number of loaded jobs.
   private int loadedJobCacheSize;
   
@@ -200,7 +206,7 @@ public class JobHistory extends Abstract
           + intermediateDoneDirPath + "]", e);
     }
     
-    
+    this.aclsMgr = new JobACLsManager(conf);
     
     jobListCacheSize = conf.getInt(JHAdminConfig.MR_HISTORY_JOBLIST_CACHE_SIZE,
         DEFAULT_JOBLIST_CACHE_SIZE);
@@ -256,7 +262,9 @@ public class JobHistory extends Abstract
     if (startCleanerService) {
       long maxAgeOfHistoryFiles = conf.getLong(
           JHAdminConfig.MR_HISTORY_MAX_AGE_MS, DEFAULT_HISTORY_MAX_AGE);
-    cleanerScheduledExecutor = new ScheduledThreadPoolExecutor(1);
+      cleanerScheduledExecutor = new ScheduledThreadPoolExecutor(1,
+          new ThreadFactoryBuilder().setNameFormat("LogCleaner").build()
+      );
       long runInterval = conf.getLong(
           JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS, DEFAULT_RUN_INTERVAL);
       cleanerScheduledExecutor
@@ -594,8 +602,11 @@ public class JobHistory extends Abstract
     
     MoveIntermediateToDoneRunnable(long sleepTime, int numMoveThreads) {
       this.sleepTime = sleepTime;
+      ThreadFactory tf = new ThreadFactoryBuilder()
+        .setNameFormat("MoveIntermediateToDone Thread #%d")
+        .build();
       moveToDoneExecutor = new ThreadPoolExecutor(1, numMoveThreads, 1, 
-          TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
+          TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
       running = true;
     }
   
@@ -640,7 +651,7 @@ public class JobHistory extends Abstract
       try {
         Job job = new CompletedJob(conf, metaInfo.getJobIndexInfo().getJobId(), 
             metaInfo.getHistoryFile(), true, metaInfo.getJobIndexInfo().getUser(),
-            metaInfo.getConfFile());
+            metaInfo.getConfFile(), this.aclsMgr);
         addToLoadedJobCache(job);
         return job;
       } catch (IOException e) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java Wed Nov  2 05:34:31 2011
@@ -42,11 +42,6 @@ public class JobHistoryServer extends Co
   private HistoryClientService clientService;
   private JobHistory jobHistoryService;
 
-  static{
-    Configuration.addDefaultResource("mapred-default.xml");
-    Configuration.addDefaultResource("mapred-site.xml");
-  }
-
   public JobHistoryServer() {
     super(JobHistoryServer.class.getName());
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java Wed Nov  2 05:34:31 2011
@@ -23,6 +23,7 @@ import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@@ -159,4 +160,9 @@ public class PartialJob implements org.a
     throw new IllegalStateException("Not implemented yet");
   }
 
+  @Override
+  public List<AMInfo> getAMInfos() {
+    return null;
+  }
+
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsController.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsController.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsController.java Wed Nov  2 05:34:31 2011
@@ -23,6 +23,7 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.v2.app.webapp.App;
 import org.apache.hadoop.mapreduce.v2.app.webapp.AppController;
+import org.apache.hadoop.yarn.server.nodemanager.webapp.AggregatedLogsPage;
 import org.apache.hadoop.yarn.webapp.View;
 
 import com.google.inject.Inject;
@@ -32,6 +33,7 @@ import com.google.inject.Inject;
  */
 public class HsController extends AppController {
   
+  
   @Inject HsController(App app, Configuration conf, RequestContext ctx) {
     super(app, conf, ctx, "History");
   }
@@ -169,6 +171,20 @@ public class HsController extends AppCon
     render(aboutPage());
   }
   
+  /**
+   * Render the logs page.
+   */
+  public void logs() {
+    render(HsLogsPage.class);
+  }
+
+  /**
+   * Render the nm logs page.
+   */
+  public void nmlogs() {
+    render(AggregatedLogsPage.class);
+  }
+  
   /*
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleCounterPage()

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java Wed Nov  2 05:34:31 2011
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -37,8 +38,13 @@ import org.apache.hadoop.mapreduce.v2.ut
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
@@ -86,7 +92,7 @@ public class HsJobBlock extends HtmlBloc
       return;
     }
     Map<JobACL, AccessControlList> acls = job.getJobACLs();
-    
+    List<AMInfo> amInfos = job.getAMInfos();
     JobReport jobReport = job.getReport();
     int mapTasks = job.getTotalMaps();
     int mapTasksComplete = job.getCompletedMaps();
@@ -105,6 +111,9 @@ public class HsJobBlock extends HtmlBloc
         _("Elapsed:", StringUtils.formatTime(
             Times.elapsed(startTime, finishTime, false)));
     
+    String amString =
+        amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters"; 
+    
     List<String> diagnostics = job.getDiagnostics();
     if(diagnostics != null && !diagnostics.isEmpty()) {
       StringBuffer b = new StringBuffer();
@@ -127,10 +136,44 @@ public class HsJobBlock extends HtmlBloc
       infoBlock._("ACL "+entry.getKey().getAclName()+":",
           entry.getValue().getAclString());
     }
-    html.
+    DIV<Hamlet> div = html.
       _(InfoBlock.class).
-      div(_INFO_WRAP).
-
+      div(_INFO_WRAP);
+    
+      // MRAppMasters Table
+        TABLE<DIV<Hamlet>> table = div.table("#job");
+        table.
+          tr().
+            th(amString).
+          _().
+          tr().
+            th(_TH, "Attempt Number").
+            th(_TH, "Start Time").
+            th(_TH, "Node").
+            th(_TH, "Logs").
+            _();
+          for (AMInfo amInfo : amInfos) {
+            String nodeHttpAddress = amInfo.getNodeManagerHost() + 
+                ":" + amInfo.getNodeManagerHttpPort();
+            NodeId nodeId = BuilderUtils.newNodeId(
+                amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort());
+            
+            table.tr().
+              td(String.valueOf(amInfo.getAppAttemptId().getAttemptId())).
+              td(new Date(amInfo.getStartTime()).toString()).
+              td().a(".nodelink", url("http://", nodeHttpAddress), 
+                  nodeHttpAddress)._().
+              td().a(".logslink", url("logs", nodeId.toString(), 
+                  amInfo.getContainerId().toString(), jid, job.getUserName()), 
+                      "logs")._().
+            _();
+          }
+          table._();
+          div._();
+          
+        
+        html.div(_INFO_WRAP).        
+      
       // Tasks table
         table("#job").
           tr().

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java Wed Nov  2 05:34:31 2011
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.webapp.Sub
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TFOOT;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
@@ -111,7 +112,10 @@ public class HsTaskPage extends HsView {
         String taid = MRApps.toString(ta.getID());
 
         String nodeHttpAddr = ta.getNodeHttpAddress();
-        
+        String containerIdString = ta.getAssignedContainerID().toString();
+        String nodeIdString = ta.getAssignedContainerMgrAddress();
+        String nodeRackName = ta.getNodeRackName();        
+
         long attemptStartTime = ta.getLaunchTime();
         long shuffleFinishTime = -1;
         long sortFinishTime = -1;
@@ -134,12 +138,16 @@ public class HsTaskPage extends HsView {
         int sortId = ta.getID().getId() + (ta.getID().getTaskId().getId() * 10000);
         
         TR<TBODY<TABLE<Hamlet>>> row = tbody.tr();
-        row.
-            td().
-              br().$title(String.valueOf(sortId))._(). // sorting
-              _(taid)._().
-            td(ta.getState().toString()).
-            td().a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr)._();
+        TD<TR<TBODY<TABLE<Hamlet>>>> td = row.td();
+
+        td.br().$title(String.valueOf(sortId))._(). // sorting
+            _(taid)._().td(ta.getState().toString()).td().a(".nodelink",
+                url("http://", nodeHttpAddr),
+                nodeRackName + "/" + nodeHttpAddr);
+        td._(" ").a(".logslink",
+            url("logs", nodeIdString, containerIdString, taid, app.getJob()
+                .getUserName()), "logs");
+        td._();
         
         row.td().
           br().$title(String.valueOf(attemptStartTime))._().

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java Wed Nov  2 05:34:31 2011
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.mapreduce.v2.hs.webapp;
 
+import static org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebParams.CONTAINER_ID;
+import static org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebParams.NM_NODENAME;
+import static org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebParams.ENTITY_STRING;
+import static org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebParams.APP_OWNER;
 import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
 
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
@@ -51,6 +55,10 @@ public class HsWebApp extends WebApp imp
     route(pajoin("/singletaskcounter",TASK_ID, COUNTER_GROUP, COUNTER_NAME),
         HsController.class, "singleTaskCounter");
     route("/about", HsController.class, "about");
+    route(pajoin("/logs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER),
+        HsController.class, "logs");
+    route(pajoin("/nmlogs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER),
+        HsController.class, "nmlogs");
   }
 }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java Wed Nov  2 05:34:31 2011
@@ -41,8 +41,10 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.junit.Test;
 
 public class TestJobHistoryEvents {
@@ -159,6 +161,10 @@ public class TestJobHistoryEvents {
   private void verifyAttempt(TaskAttempt attempt) {
     Assert.assertEquals("TaskAttempt state not currect", 
         TaskAttemptState.SUCCEEDED, attempt.getState());
+    Assert.assertNotNull(attempt.getAssignedContainerID());
+  //Verify the wrong ctor is not being used. Remove after mrv1 is removed.
+    ContainerId fakeCid = BuilderUtils.newContainerId(-1, -1, -1, -1);
+    Assert.assertFalse(attempt.getAssignedContainerID().equals(fakeCid));
   }
 
   static class MRAppWithHistory extends MRApp {



Mime
View raw message