hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1380990 [1/2] - in /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project: ./ conf/ hadoop-mapreduce-client/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-map...
Date Wed, 05 Sep 2012 04:58:29 GMT
Author: todd
Date: Wed Sep  5 04:57:47 2012
New Revision: 1380990

URL: http://svn.apache.org/viewvc?rev=1380990&view=rev
Log:
Merge trunk into QJM branch

Added:
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java
      - copied unchanged from r1380986, hadoop/common/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java
Removed:
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
Modified:
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/conf/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml   (contents, props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/pom.xml
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/c++/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/block_forensics/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/build-contrib.xml   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/build.xml   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/data_join/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/eclipse-plugin/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/index/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/vaidya/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/examples/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/java/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/java/mapred-default.xml
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/webapps/job/   (props changed)

Propchange: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project:r1377086-1380986

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/CHANGES.txt?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/CHANGES.txt Wed Sep  5 04:57:47 2012
@@ -144,6 +144,12 @@ Branch-2 ( Unreleased changes )
 
     MAPREDUCE-4511. Add IFile readahead (ahmed via tucu)
 
+    MAPREDUCE-4408. allow jobs to set a JAR that is in the distributed cached 
+    (rkanter via tucu)
+
+    MAPREDUCE-2786. Add compression option for TestDFSIO.
+    (Plamen Jeliazkov via shv)
+
   BUG FIXES
 
     MAPREDUCE-4422. YARN_APPLICATION_CLASSPATH needs a documented default value in 
@@ -181,6 +187,12 @@ Branch-2 ( Unreleased changes )
 
     MAPREDUCE-4470. Fix TestCombineFileInputFormat.testForEmptyFile (ikatsov via tucu)
 
+    MAPREDUCE-4608. hadoop-mapreduce-client is missing some dependencies.
+    (tucu via tomwhite)
+
+    MAPREDUCE-4610. Support deprecated mapreduce.job.counters.limit property in
+    MR2. (tomwhite)
+
 Release 2.1.0-alpha - Unreleased 
 
   INCOMPATIBLE CHANGES
@@ -231,6 +243,12 @@ Release 2.1.0-alpha - Unreleased 
     MAPREDUCE-3289. Make use of fadvise in the NM's shuffle handler.
     (Todd Lipcon and Siddharth Seth via sseth)
 
+    MAPREDUCE-4580. Change MapReduce to use the yarn-client module.
+    (Vinod Kumar Vavilapalli via sseth)
+
+    MAPREDUCE-4579. Split TestTaskAttempt into two so as to pass tests on
+    jdk7. (Thomas Graves via vinodkv)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -491,6 +509,18 @@ Release 2.0.0-alpha - 05-23-2012
     MAPREDUCE-4444. nodemanager fails to start when one of the local-dirs is
     bad (Jason Lowe via bobby)
 
+Release 0.23.4 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 0.23.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -538,6 +568,9 @@ Release 0.23.3 - UNRELEASED
     MAPREDUCE-4375. Show Configuration Tracability in MR UI (bobby 
     via tgraves)
 
+    MAPREDUCE-4569. Fixed TestHsWebServicesJobsQuery to pass on JDK7 by not
+    depending on test order. (Thomas Graves via vinodkv)
+
   OPTIMIZATIONS
 
     MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn
@@ -832,6 +865,21 @@ Release 0.23.3 - UNRELEASED
     MAPREDUCE-4570. ProcfsBasedProcessTree#constructProcessInfo() prints a
     warning if procfsDir/<pid>/stat is not found. (Ahmed Radwan via bobby)
 
+    MAPREDUCE-4600. TestTokenCache.java from MRV1 no longer compiles  (daryn 
+    via bobby)
+
+    MAPREDUCE-4612. job summary file permissions not set when its created
+    (tgraves via bobby)
+
+    MAPREDUCE-4614. Simplify debugging a job's tokens (daryn via bobby)
+
+    MAPREDUCE-4611. MR AM dies badly when Node is decommissioned (Robert
+    Evans via tgraves)
+
+    MAPREDUCE-4604. In mapred-default, mapreduce.map.maxattempts &
+    mapreduce.reduce.maxattempts defaults are set to 4 as well as
+    mapreduce.job.maxtaskfailures.per.tracker. (Ravi Prakash via jeagles)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/CHANGES.txt:r1377086-1380986

Propchange: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/conf/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/conf:r1377086-1380986

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java Wed Sep  5 04:57:47 2012
@@ -93,7 +93,11 @@ class YarnChild {
     // Security framework already loaded the tokens into current ugi
     Credentials credentials =
         UserGroupInformation.getCurrentUser().getCredentials();
-    
+    LOG.info("Executing with tokens:");
+    for (Token<?> token: credentials.getAllTokens()) {
+      LOG.info(token);
+    }
+
     // Create TaskUmbilicalProtocol as actual task owner.
     UserGroupInformation taskOwner =
       UserGroupInformation.createRemoteUser(firstTaskid.getJobID().toString());
@@ -141,7 +145,7 @@ class YarnChild {
       childUGI = UserGroupInformation.createRemoteUser(System
           .getenv(ApplicationConstants.Environment.USER.toString()));
       // Add tokens to new user so that it may execute its task correctly.
-      job.getCredentials().addTokensToUGI(childUGI);
+      childUGI.addCredentials(credentials);
 
       // Create a final reference to the task for the doAs block
       final Task taskFinal = task;

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java Wed Sep  5 04:57:47 2012
@@ -99,8 +99,8 @@ public class JobHistoryEventHandler exte
   protected static final Map<JobId, MetaInfo> fileMap =
     Collections.<JobId,MetaInfo>synchronizedMap(new HashMap<JobId,MetaInfo>());
 
-  // Has a signal (SIGTERM etc) been issued?
-  protected volatile boolean isSignalled = false;
+  // should job completion be force when the AM shuts down?
+  protected volatile boolean forceJobCompletion = false;
 
   public JobHistoryEventHandler(AppContext context, int startCount) {
     super("JobHistoryEventHandler");
@@ -322,7 +322,7 @@ public class JobHistoryEventHandler exte
     // Process JobUnsuccessfulCompletionEvent for jobIds which still haven't
     // closed their event writers
     Iterator<JobId> jobIt = fileMap.keySet().iterator();
-    if(isSignalled) {
+    if(forceJobCompletion) {
       while (jobIt.hasNext()) {
         JobId toClose = jobIt.next();
         MetaInfo mi = fileMap.get(toClose);
@@ -661,6 +661,8 @@ public class JobHistoryEventHandler exte
       summaryFileOut = doneDirFS.create(qualifiedSummaryDoneFile, true);
       summaryFileOut.writeUTF(mi.getJobSummary().getJobSummaryString());
       summaryFileOut.close();
+      doneDirFS.setPermission(qualifiedSummaryDoneFile, new FsPermission(
+          JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS));
     } catch (IOException e) {
       LOG.info("Unable to write out JobSummaryInfo to ["
           + qualifiedSummaryDoneFile + "]", e);
@@ -894,7 +896,7 @@ public class JobHistoryEventHandler exte
       
       stagingDirFS.delete(fromPath, false);
     }
-    }
+  }
 
   boolean pathExists(FileSystem fileSys, Path path) throws IOException {
     return fileSys.exists(path);
@@ -909,9 +911,9 @@ public class JobHistoryEventHandler exte
     return tmpFileName.substring(0, tmpFileName.length()-4);
   }
 
-  public void setSignalled(boolean isSignalled) {
-    this.isSignalled = isSignalled;
-    LOG.info("JobHistoryEventHandler notified that isSignalled was "
-      + isSignalled);
+  public void setForcejobCompletion(boolean forceJobCompletion) {
+    this.forceJobCompletion = forceJobCompletion;
+    LOG.info("JobHistoryEventHandler notified that forceJobCompletion is "
+      + forceJobCompletion);
   }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java Wed Sep  5 04:57:47 2012
@@ -170,6 +170,8 @@ public class MRAppMaster extends Composi
   private Credentials fsTokens = new Credentials(); // Filled during init
   private UserGroupInformation currentUser; // Will be setup during init
 
+  private volatile boolean isLastAMRetry = false;
+
   public MRAppMaster(ApplicationAttemptId applicationAttemptId,
       ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
       long appSubmitTime) {
@@ -195,11 +197,21 @@ public class MRAppMaster extends Composi
 
   @Override
   public void init(final Configuration conf) {
-
     conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
 
     downloadTokensAndSetupUGI(conf);
-
+    
+    //TODO this is a hack, we really need the RM to inform us when we
+    // are the last one.  This would allow us to configure retries on
+    // a per application basis.
+    int numAMRetries = conf.getInt(YarnConfiguration.RM_AM_MAX_RETRIES, 
+        YarnConfiguration.DEFAULT_RM_AM_MAX_RETRIES);
+    isLastAMRetry = appAttemptID.getAttemptId() >= numAMRetries;
+    LOG.info("AM Retries: " + numAMRetries + 
+        " attempt num: " + appAttemptID.getAttemptId() +
+        " is last retry: " + isLastAMRetry);
+    
+    
     context = new RunningAppContext(conf);
 
     // Job name is the same as the app name util we support DAG of jobs
@@ -417,6 +429,8 @@ public class MRAppMaster extends Composi
       }
 
       try {
+        //We are finishing cleanly so this is the last retry
+        isLastAMRetry = true;
         // Stop all services
         // This will also send the final report to the ResourceManager
         LOG.info("Calling stop for all the services");
@@ -487,7 +501,7 @@ public class MRAppMaster extends Composi
         fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
         LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
             + jobTokenFile);
-        fsTokens.addTokensToUGI(currentUser); // For use by AppMaster itself.
+        currentUser.addCredentials(fsTokens); // For use by AppMaster itself.
       }
     } catch (IOException e) {
       throw new YarnException(e);
@@ -666,7 +680,11 @@ public class MRAppMaster extends Composi
     }
 
     public void setSignalled(boolean isSignalled) {
-      ((RMCommunicator) containerAllocator).setSignalled(true);
+      ((RMCommunicator) containerAllocator).setSignalled(isSignalled);
+    }
+    
+    public void setShouldUnregister(boolean shouldUnregister) {
+      ((RMCommunicator) containerAllocator).setShouldUnregister(shouldUnregister);
     }
   }
 
@@ -717,7 +735,12 @@ public class MRAppMaster extends Composi
     @Override
     public synchronized void stop() {
       try {
-        cleanupStagingDir();
+        if(isLastAMRetry) {
+          cleanupStagingDir();
+        } else {
+          LOG.info("Skipping cleaning up the staging dir. "
+              + "assuming AM will be retried.");
+        }
       } catch (IOException io) {
         LOG.error("Failed to cleanup staging dir: ", io);
       }
@@ -1016,14 +1039,19 @@ public class MRAppMaster extends Composi
     public void run() {
       LOG.info("MRAppMaster received a signal. Signaling RMCommunicator and "
         + "JobHistoryEventHandler.");
+
       // Notify the JHEH and RMCommunicator that a SIGTERM has been received so
       // that they don't take too long in shutting down
       if(appMaster.containerAllocator instanceof ContainerAllocatorRouter) {
         ((ContainerAllocatorRouter) appMaster.containerAllocator)
         .setSignalled(true);
+        ((ContainerAllocatorRouter) appMaster.containerAllocator)
+        .setShouldUnregister(appMaster.isLastAMRetry);
       }
+      
       if(appMaster.jobHistoryEventHandler != null) {
-        appMaster.jobHistoryEventHandler.setSignalled(true);
+        appMaster.jobHistoryEventHandler
+          .setForcejobCompletion(appMaster.isLastAMRetry);
       }
       appMaster.stop();
     }

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java Wed Sep  5 04:57:47 2012
@@ -84,6 +84,7 @@ public abstract class RMCommunicator ext
   private Job job;
   // Has a signal (SIGTERM etc) been issued?
   protected volatile boolean isSignalled = false;
+  private volatile boolean shouldUnregister = true;
 
   public RMCommunicator(ClientService clientService, AppContext context) {
     super("RMCommunicator");
@@ -213,7 +214,9 @@ public abstract class RMCommunicator ext
     } catch (InterruptedException ie) {
       LOG.warn("InterruptedException while stopping", ie);
     }
-    unregister();
+    if(shouldUnregister) {
+      unregister();
+    }
     super.stop();
   }
 
@@ -288,8 +291,15 @@ public abstract class RMCommunicator ext
 
   protected abstract void heartbeat() throws Exception;
 
+  public void setShouldUnregister(boolean shouldUnregister) {
+    this.shouldUnregister = shouldUnregister;
+    LOG.info("RMCommunicator notified that shouldUnregistered is: " 
+        + shouldUnregister);
+  }
+  
   public void setSignalled(boolean isSignalled) {
     this.isSignalled = isSignalled;
-    LOG.info("RMCommunicator notified that iSignalled was : " + isSignalled);
+    LOG.info("RMCommunicator notified that iSignalled is: " 
+        + isSignalled);
   }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java Wed Sep  5 04:57:47 2012
@@ -330,7 +330,7 @@ public class TestJobHistoryEventHandler 
     Mockito.when(jobId.getAppId()).thenReturn(mockAppId);
 
     jheh.addToFileMap(jobId);
-    jheh.setSignalled(true);
+    jheh.setForcejobCompletion(true);
     for(int i=0; i < numEvents; ++i) {
       events[i] = getEventToEnqueue(jobId);
       jheh.handle(events[i]);

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java Wed Sep  5 04:57:47 2012
@@ -93,7 +93,6 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
-import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -1099,8 +1098,7 @@ public class TestRMContainerAllocator {
       super();
       try {
         Configuration conf = new Configuration();
-        reinitialize(conf, new ContainerTokenSecretManager(conf),
-            rmContext);
+        reinitialize(conf, rmContext);
       } catch (IOException ie) {
         LOG.info("add application failed with ", ie);
         assert (false);

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java Wed Sep  5 04:57:47 2012
@@ -23,6 +23,7 @@ import static org.mockito.Matchers.anyBo
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.times;
 
 import java.io.IOException;
 
@@ -47,6 +48,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -89,28 +91,94 @@ import org.junit.Test;
      handler.handle(new JobFinishEvent(jobid));
      verify(fs).delete(stagingJobPath, true);
    }
+   
+   @Test
+   public void testDeletionofStagingOnKill() throws IOException {
+     conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
+     conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, 4);
+     fs = mock(FileSystem.class);
+     when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
+     ApplicationAttemptId attemptId = recordFactory.newRecordInstance(
+         ApplicationAttemptId.class);
+     attemptId.setAttemptId(0);
+     ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
+     appId.setClusterTimestamp(System.currentTimeMillis());
+     appId.setId(0);
+     attemptId.setApplicationId(appId);
+     JobId jobid = recordFactory.newRecordInstance(JobId.class);
+     jobid.setAppId(appId);
+     ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
+     MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc);
+     appMaster.init(conf);
+     //simulate the process being killed
+     MRAppMaster.MRAppMasterShutdownHook hook = 
+       new MRAppMaster.MRAppMasterShutdownHook(appMaster);
+     hook.run();
+     verify(fs, times(0)).delete(stagingJobPath, true);
+   }
+   
+   @Test
+   public void testDeletionofStagingOnKillLastTry() throws IOException {
+     conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
+     conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, 1);
+     fs = mock(FileSystem.class);
+     when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
+     ApplicationAttemptId attemptId = recordFactory.newRecordInstance(
+         ApplicationAttemptId.class);
+     attemptId.setAttemptId(1);
+     ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
+     appId.setClusterTimestamp(System.currentTimeMillis());
+     appId.setId(0);
+     attemptId.setApplicationId(appId);
+     JobId jobid = recordFactory.newRecordInstance(JobId.class);
+     jobid.setAppId(appId);
+     ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
+     MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc);
+     appMaster.init(conf);
+     //simulate the process being killed
+     MRAppMaster.MRAppMasterShutdownHook hook = 
+       new MRAppMaster.MRAppMasterShutdownHook(appMaster);
+     hook.run();
+     verify(fs).delete(stagingJobPath, true);
+   }
 
    private class TestMRApp extends MRAppMaster {
+     ContainerAllocator allocator;
 
-    public TestMRApp(ApplicationAttemptId applicationAttemptId) {
-      super(applicationAttemptId, BuilderUtils.newContainerId(
-          applicationAttemptId, 1), "testhost", 2222, 3333, System
-          .currentTimeMillis());
-    }
-     
-    @Override
-    protected FileSystem getFileSystem(Configuration conf) {
-      return fs;
-    }
-    
-    @Override
-    protected void sysexit() {      
-    }
-    
-    @Override
-    public Configuration getConfig() {
-      return conf;
-    }
+     public TestMRApp(ApplicationAttemptId applicationAttemptId, 
+         ContainerAllocator allocator) {
+       super(applicationAttemptId, BuilderUtils.newContainerId(
+           applicationAttemptId, 1), "testhost", 2222, 3333, System
+           .currentTimeMillis());
+       this.allocator = allocator;
+     }
+
+     public TestMRApp(ApplicationAttemptId applicationAttemptId) {
+       this(applicationAttemptId, null);
+     }
+
+     @Override
+     protected FileSystem getFileSystem(Configuration conf) {
+       return fs;
+     }
+
+     @Override
+     protected ContainerAllocator createContainerAllocator(
+         final ClientService clientService, final AppContext context) {
+       if(allocator == null) {
+         return super.createContainerAllocator(clientService, context);
+       }
+       return allocator;
+     }
+
+     @Override
+     protected void sysexit() {      
+     }
+
+     @Override
+     public Configuration getConfig() {
+       return conf;
+     }
    }
 
   private final class MRAppTestCleanup extends MRApp {

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java Wed Sep  5 04:57:47 2012
@@ -28,7 +28,6 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
@@ -36,23 +35,17 @@ import java.util.Map;
 import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.DataInputByteBuffer;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MapTaskAttemptImpl;
-import org.apache.hadoop.mapred.WrappedJvmID;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion;
-import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -79,18 +72,14 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.ClusterInfo;
 import org.apache.hadoop.yarn.SystemClock;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.event.Event;
@@ -101,82 +90,6 @@ import org.mockito.ArgumentCaptor;
 
 @SuppressWarnings({"unchecked", "rawtypes"})
 public class TestTaskAttempt{
-  @Test
-  public void testAttemptContainerRequest() throws Exception {
-    //WARNING: This test must run first.  This is because there is an 
-    // optimization where the credentials passed in are cached statically so 
-    // they do not need to be recomputed when creating a new 
-    // ContainerLaunchContext. if other tests run first this code will cache
-    // their credentials and this test will fail trying to look for the
-    // credentials it inserted in.
-    final Text SECRET_KEY_ALIAS = new Text("secretkeyalias");
-    final byte[] SECRET_KEY = ("secretkey").getBytes();
-    Map<ApplicationAccessType, String> acls =
-        new HashMap<ApplicationAccessType, String>(1);
-    acls.put(ApplicationAccessType.VIEW_APP, "otheruser");
-    ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
-    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
-    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
-    Path jobFile = mock(Path.class);
-
-    EventHandler eventHandler = mock(EventHandler.class);
-    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
-    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
-
-    JobConf jobConf = new JobConf();
-    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
-    jobConf.setBoolean("fs.file.impl.disable.cache", true);
-    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
-
-    // setup UGI for security so tokens and keys are preserved
-    jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-    UserGroupInformation.setConfiguration(jobConf);
-
-    Credentials credentials = new Credentials();
-    credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY);
-    Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(
-        ("tokenid").getBytes(), ("tokenpw").getBytes(),
-        new Text("tokenkind"), new Text("tokenservice"));
-    
-    TaskAttemptImpl taImpl =
-        new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
-            mock(TaskSplitMetaInfo.class), jobConf, taListener,
-            mock(OutputCommitter.class), jobToken, credentials,
-            new SystemClock(), null);
-
-    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());
-    ContainerId containerId = BuilderUtils.newContainerId(1, 1, 1, 1);
-    
-    ContainerLaunchContext launchCtx =
-        TaskAttemptImpl.createContainerLaunchContext(acls, containerId,
-            jobConf, jobToken, taImpl.createRemoteTask(),
-            TypeConverter.fromYarn(jobId), mock(Resource.class),
-            mock(WrappedJvmID.class), taListener,
-            credentials);
-
-    Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs());
-    Credentials launchCredentials = new Credentials();
-
-    DataInputByteBuffer dibb = new DataInputByteBuffer();
-    dibb.reset(launchCtx.getContainerTokens());
-    launchCredentials.readTokenStorageStream(dibb);
-
-    // verify all tokens specified for the task attempt are in the launch context
-    for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
-      Token<? extends TokenIdentifier> launchToken =
-          launchCredentials.getToken(token.getService());
-      Assert.assertNotNull("Token " + token.getService() + " is missing",
-          launchToken);
-      Assert.assertEquals("Token " + token.getService() + " mismatch",
-          token, launchToken);
-    }
-
-    // verify the secret key is in the launch context
-    Assert.assertNotNull("Secret key missing",
-        launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
-    Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY,
-        launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
-  }
 
   static public class StubbedFS extends RawLocalFileSystem {
     @Override
@@ -227,7 +140,7 @@ public class TestTaskAttempt{
     //Only a single occurrence of /DefaultRack
     assertEquals(1, requestedRacks.length);
   }
- 
+
   @Test
   public void testHostResolveAttempt() throws Exception {
     TaskAttemptImpl.RequestContainerTransition rct =
@@ -266,7 +179,7 @@ public class TestTaskAttempt{
     }
     assertEquals(0, expected.size());
   }
-  
+
   @Test
   public void testSlotMillisCounterUpdate() throws Exception {
     verifySlotMillis(2048, 2048, 1024);
@@ -325,13 +238,13 @@ public class TestTaskAttempt{
             .getAllCounters().findCounter(JobCounter.SLOTS_MILLIS_REDUCES)
             .getValue());
   }
-  
+
   private TaskAttemptImpl createMapTaskAttemptImplForTest(
       EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo) {
     Clock clock = new SystemClock();
     return createMapTaskAttemptImplForTest(eventHandler, taskSplitMetaInfo, clock);
   }
-  
+
   private TaskAttemptImpl createMapTaskAttemptImplForTest(
       EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
     ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
@@ -402,30 +315,30 @@ public class TestTaskAttempt{
       };
     }
   }
-  
+
   @Test
   public void testLaunchFailedWhileKilling() throws Exception {
     ApplicationId appId = BuilderUtils.newApplicationId(1, 2);
-    ApplicationAttemptId appAttemptId = 
+    ApplicationAttemptId appAttemptId =
       BuilderUtils.newApplicationAttemptId(appId, 0);
     JobId jobId = MRBuilderUtils.newJobId(appId, 1);
     TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
     TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
     Path jobFile = mock(Path.class);
-    
+
     MockEventHandler eventHandler = new MockEventHandler();
     TaskAttemptListener taListener = mock(TaskAttemptListener.class);
     when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
-    
+
     JobConf jobConf = new JobConf();
     jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
     jobConf.setBoolean("fs.file.impl.disable.cache", true);
     jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
     jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
-    
+
     TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
     when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
-    
+
     TaskAttemptImpl taImpl =
       new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
           splits, jobConf, taListener,
@@ -437,7 +350,7 @@ public class TestTaskAttempt{
     Container container = mock(Container.class);
     when(container.getId()).thenReturn(contId);
     when(container.getNodeId()).thenReturn(nid);
-    
+
     taImpl.handle(new TaskAttemptEvent(attemptId,
         TaskAttemptEventType.TA_SCHEDULE));
     taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
@@ -450,7 +363,7 @@ public class TestTaskAttempt{
         TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
     assertFalse(eventHandler.internalError);
   }
-  
+
   @Test
   public void testContainerCleanedWhileRunning() throws Exception {
     ApplicationId appId = BuilderUtils.newApplicationId(1, 2);
@@ -565,7 +478,7 @@ public class TestTaskAttempt{
     assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
         eventHandler.internalError);
   }
-  
+
   @Test
   public void testDoubleTooManyFetchFailure() throws Exception {
     ApplicationId appId = BuilderUtils.newApplicationId(1, 2);
@@ -618,7 +531,7 @@ public class TestTaskAttempt{
         TaskAttemptEventType.TA_DONE));
     taImpl.handle(new TaskAttemptEvent(attemptId,
         TaskAttemptEventType.TA_CONTAINER_CLEANED));
-    
+
     assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
         TaskAttemptState.SUCCEEDED);
     taImpl.handle(new TaskAttemptEvent(attemptId,
@@ -635,7 +548,7 @@ public class TestTaskAttempt{
 
   public static class MockEventHandler implements EventHandler {
     public boolean internalError;
-    
+
     @Override
     public void handle(Event event) {
       if (event instanceof JobEvent) {
@@ -645,6 +558,6 @@ public class TestTaskAttempt{
         }
       }
     }
-    
+
   };
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml Wed Sep  5 04:57:47 2012
@@ -39,6 +39,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-core</artifactId>
     </dependency>
     <dependency>

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java Wed Sep  5 04:57:47 2012
@@ -1357,7 +1357,7 @@ public class JobConf extends Configurati
    * @return the maximum no. of failures of a given job per tasktracker.
    */
   public int getMaxTaskFailuresPerTracker() {
-    return getInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 4); 
+    return getInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 3);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java Wed Sep  5 04:57:47 2012
@@ -232,9 +232,17 @@ class JobSubmitter {
       if ("".equals(job.getJobName())){
         job.setJobName(new Path(jobJar).getName());
       }
-      copyJar(new Path(jobJar), JobSubmissionFiles.getJobJar(submitJobDir), 
-          replication);
-      job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString());
+      Path jobJarPath = new Path(jobJar);
+      URI jobJarURI = jobJarPath.toUri();
+      // If the job jar is already in fs, we don't need to copy it from local fs
+      if (jobJarURI.getScheme() == null || jobJarURI.getAuthority() == null
+              || !(jobJarURI.getScheme().equals(jtFs.getUri().getScheme()) 
+                  && jobJarURI.getAuthority().equals(
+                                            jtFs.getUri().getAuthority()))) {
+        copyJar(jobJarPath, JobSubmissionFiles.getJobJar(submitJobDir), 
+            replication);
+        job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString());
+      }
     } else {
       LOG.warn("No job jar file set.  User classes may not be found. "+
       "See Job or Job#setJar(String).");
@@ -427,13 +435,9 @@ class JobSubmitter {
   
   private void printTokens(JobID jobId,
       Credentials credentials) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Printing tokens for job: " + jobId);
-      for(Token<?> token: credentials.getAllTokens()) {
-        if (token.getKind().toString().equals("HDFS_DELEGATION_TOKEN")) {
-          LOG.debug("Submitting with " + token);
-        }
-      }
+    LOG.info("Submitting tokens for job: " + jobId);
+    for (Token<?> token: credentials.getAllTokens()) {
+      LOG.info(token);
     }
   }
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java Wed Sep  5 04:57:47 2012
@@ -20,12 +20,14 @@ package org.apache.hadoop.mapreduce.coun
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.JobConf;
+
 import static org.apache.hadoop.mapreduce.MRJobConfig.*;
 
 @InterfaceAudience.Private
 public class Limits {
 
-  static final Configuration conf = new Configuration();
+  static final Configuration conf = new JobConf();
   public static final int GROUP_NAME_MAX =
       conf.getInt(COUNTER_GROUP_NAME_MAX_KEY, COUNTER_GROUP_NAME_MAX_DEFAULT);
   public static final int COUNTER_NAME_MAX =

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java Wed Sep  5 04:57:47 2012
@@ -376,6 +376,8 @@ public class ConfigUtil {
       new String[] {MRJobConfig.REDUCE_SKIP_MAXGROUPS});
     Configuration.addDeprecation("mapred.reduce.child.log.level", 
       new String[] {MRJobConfig.REDUCE_LOG_LEVEL});
+    Configuration.addDeprecation("mapreduce.job.counters.limit", 
+      new String[] {MRJobConfig.COUNTERS_MAX_KEY});
     Configuration.addDeprecation("jobclient.completion.poll.interval", 
       new String[] {Job.COMPLETION_POLL_INTERVAL_KEY});
     Configuration.addDeprecation("jobclient.progress.monitor.poll.interval", 

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml Wed Sep  5 04:57:47 2012
@@ -797,9 +797,12 @@
 
 <property>
   <name>mapreduce.job.maxtaskfailures.per.tracker</name>
-  <value>4</value>
+  <value>3</value>
   <description>The number of task-failures on a tasktracker of a given job 
-               after which new tasks of that job aren't assigned to it.
+               after which new tasks of that job aren't assigned to it. It
+               MUST be less than mapreduce.map.maxattempts and
+               mapreduce.reduce.maxattempts otherwise the failed task will
+               never be tried on a different node.
   </description>
 </property>
 

Propchange: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml:r1377086-1380986

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java Wed Sep  5 04:57:47 2012
@@ -27,6 +27,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.JobStatus.State;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
@@ -55,14 +56,20 @@ public class TestJob {
 
   @Test
   public void testUGICredentialsPropogation() throws Exception {
+    Credentials creds = new Credentials();
     Token<?> token = mock(Token.class);
-    Text service = new Text("service");
-    
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    ugi.addToken(service, token);
+    Text tokenService = new Text("service");
+    Text secretName = new Text("secret");
+    byte secret[] = new byte[]{};
+        
+    creds.addToken(tokenService,  token);
+    creds.addSecretKey(secretName, secret);
+    UserGroupInformation.getLoginUser().addCredentials(creds);
     
     JobConf jobConf = new JobConf();
     Job job = new Job(jobConf);
-    assertSame(token, job.getCredentials().getToken(service));
+
+    assertSame(token, job.getCredentials().getToken(tokenService));
+    assertSame(secret, job.getCredentials().getSecretKey(secretName));
   }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java Wed Sep  5 04:57:47 2012
@@ -25,6 +25,7 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.Map;
 
@@ -217,9 +218,23 @@ public class TestHsWebServicesJobsQuery 
   @Test
   public void testJobsQueryStateNone() throws JSONException, Exception {
     WebResource r = resource();
+
+     ArrayList<JobState> JOB_STATES = 
+         new ArrayList<JobState>(Arrays.asList(JobState.values()));
+
+      // find a state that isn't in use
+      Map<JobId, Job> jobsMap = appContext.getAllJobs();
+      for (Map.Entry<JobId, Job> entry : jobsMap.entrySet()) {
+        JOB_STATES.remove(entry.getValue().getState());
+      }
+
+    assertTrue("No unused job states", JOB_STATES.size() > 0);
+    JobState notInUse = JOB_STATES.get(0);
+
     ClientResponse response = r.path("ws").path("v1").path("history")
-        .path("mapreduce").path("jobs").queryParam("state", JobState.KILL_WAIT.toString())
+        .path("mapreduce").path("jobs").queryParam("state", notInUse.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
     JSONObject json = response.getEntity(JSONObject.class);
     assertEquals("incorrect number of elements", 1, json.length());

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java Wed Sep  5 04:57:47 2012
@@ -19,9 +19,6 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -41,75 +38,29 @@ import org.apache.hadoop.mapreduce.v2.ut
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
-import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.util.ProtoUtils;
+import org.hadoop.yarn.client.YarnClientImpl;
 
-
-// TODO: This should be part of something like yarn-client.
-public class ResourceMgrDelegate {
+public class ResourceMgrDelegate extends YarnClientImpl {
   private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
       
-  private final InetSocketAddress rmAddress;
   private YarnConfiguration conf;
-  ClientRMProtocol applicationsManager;
+  private GetNewApplicationResponse application;
   private ApplicationId applicationId;
-  private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
 
   /**
    * Delegate responsible for communicating with the Resource Manager's {@link ClientRMProtocol}.
    * @param conf the configuration object.
    */
   public ResourceMgrDelegate(YarnConfiguration conf) {
+    super();
     this.conf = conf;
-    YarnRPC rpc = YarnRPC.create(this.conf);
-    this.rmAddress = getRmAddress(conf);
-    LOG.debug("Connecting to ResourceManager at " + rmAddress);
-    applicationsManager =
-        (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class,
-            rmAddress, this.conf);
-    LOG.debug("Connected to ResourceManager at " + rmAddress);
-  }
-  
-  /**
-   * Used for injecting applicationsManager, mostly for testing.
-   * @param conf the configuration object
-   * @param applicationsManager the handle to talk the resource managers 
-   *                            {@link ClientRMProtocol}.
-   */
-  public ResourceMgrDelegate(YarnConfiguration conf, 
-      ClientRMProtocol applicationsManager) {
-    this.conf = conf;
-    this.applicationsManager = applicationsManager;
-    this.rmAddress = getRmAddress(conf);
-  }
-  
-  private static InetSocketAddress getRmAddress(YarnConfiguration conf) {
-    return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
-                              YarnConfiguration.DEFAULT_RM_ADDRESS,
-                              YarnConfiguration.DEFAULT_RM_PORT);
+    init(conf);
+    start();
   }
   
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
@@ -117,26 +68,15 @@ public class ResourceMgrDelegate {
     return;
   }
 
-
   public TaskTrackerInfo[] getActiveTrackers() throws IOException,
       InterruptedException {
-    GetClusterNodesRequest request = 
-      recordFactory.newRecordInstance(GetClusterNodesRequest.class);
-    GetClusterNodesResponse response = 
-      applicationsManager.getClusterNodes(request);
-    return TypeConverter.fromYarnNodes(response.getNodeReports());
+    return TypeConverter.fromYarnNodes(super.getNodeReports());
   }
 
-
   public JobStatus[] getAllJobs() throws IOException, InterruptedException {
-    GetAllApplicationsRequest request =
-      recordFactory.newRecordInstance(GetAllApplicationsRequest.class);
-    GetAllApplicationsResponse response = 
-      applicationsManager.getAllApplications(request);
-    return TypeConverter.fromYarnApps(response.getApplicationList(), this.conf);
+    return TypeConverter.fromYarnApps(super.getApplicationList(), this.conf);
   }
 
-
   public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
       InterruptedException {
     // TODO: Implement getBlacklistedTrackers
@@ -144,128 +84,56 @@ public class ResourceMgrDelegate {
     return new TaskTrackerInfo[0];
   }
 
-
   public ClusterMetrics getClusterMetrics() throws IOException,
       InterruptedException {
-    GetClusterMetricsRequest request = recordFactory.newRecordInstance(GetClusterMetricsRequest.class);
-    GetClusterMetricsResponse response = applicationsManager.getClusterMetrics(request);
-    YarnClusterMetrics metrics = response.getClusterMetrics();
+    YarnClusterMetrics metrics = super.getYarnClusterMetrics();
     ClusterMetrics oldMetrics = new ClusterMetrics(1, 1, 1, 1, 1, 1, 
         metrics.getNumNodeManagers() * 10, metrics.getNumNodeManagers() * 2, 1,
         metrics.getNumNodeManagers(), 0, 0);
     return oldMetrics;
   }
 
-
   @SuppressWarnings("rawtypes")
-  public Token getDelegationToken(Text renewer)
-      throws IOException, InterruptedException {
-    /* get the token from RM */
-    org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest 
-    rmDTRequest = recordFactory.newRecordInstance(
-        org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest.class);
-    rmDTRequest.setRenewer(renewer.toString());
-    org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse 
-      response = applicationsManager.getDelegationToken(rmDTRequest);
-    DelegationToken yarnToken = response.getRMDelegationToken();
-    return ProtoUtils.convertFromProtoFormat(yarnToken, rmAddress);
+  public Token getDelegationToken(Text renewer) throws IOException,
+      InterruptedException {
+    return ProtoUtils.convertFromProtoFormat(
+      super.getRMDelegationToken(renewer), rmAddress);
   }
 
-
   public String getFilesystemName() throws IOException, InterruptedException {
     return FileSystem.get(conf).getUri().toString();
   }
 
   public JobID getNewJobID() throws IOException, InterruptedException {
-    GetNewApplicationRequest request = recordFactory.newRecordInstance(GetNewApplicationRequest.class);
-    applicationId = applicationsManager.getNewApplication(request).getApplicationId();
+    this.application = super.getNewApplication();
+    this.applicationId = this.application.getApplicationId();
     return TypeConverter.fromYarn(applicationId);
   }
 
-  private static final String ROOT = "root";
-
-  private GetQueueInfoRequest getQueueInfoRequest(String queueName, 
-      boolean includeApplications, boolean includeChildQueues, boolean recursive) {
-    GetQueueInfoRequest request = 
-      recordFactory.newRecordInstance(GetQueueInfoRequest.class);
-    request.setQueueName(queueName);
-    request.setIncludeApplications(includeApplications);
-    request.setIncludeChildQueues(includeChildQueues);
-    request.setRecursive(recursive);
-    return request;
-    
-  }
-  
   public QueueInfo getQueue(String queueName) throws IOException,
   InterruptedException {
-    GetQueueInfoRequest request = 
-      getQueueInfoRequest(queueName, true, false, false); 
-      recordFactory.newRecordInstance(GetQueueInfoRequest.class);
     return TypeConverter.fromYarn(
-        applicationsManager.getQueueInfo(request).getQueueInfo(), this.conf);
+        super.getQueueInfo(queueName), this.conf);
   }
-  
-  private void getChildQueues(org.apache.hadoop.yarn.api.records.QueueInfo parent, 
-      List<org.apache.hadoop.yarn.api.records.QueueInfo> queues,
-      boolean recursive) {
-    List<org.apache.hadoop.yarn.api.records.QueueInfo> childQueues = 
-      parent.getChildQueues();
-
-    for (org.apache.hadoop.yarn.api.records.QueueInfo child : childQueues) {
-      queues.add(child);
-      if(recursive) {
-        getChildQueues(child, queues, recursive);
-      }
-    }
-  }
-
 
   public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
       InterruptedException {
-    GetQueueUserAclsInfoRequest request = 
-      recordFactory.newRecordInstance(GetQueueUserAclsInfoRequest.class);
-    List<QueueUserACLInfo> userAcls = 
-      applicationsManager.getQueueUserAcls(request).getUserAclsInfoList();
-    return TypeConverter.fromYarnQueueUserAclsInfo(userAcls);
+    return TypeConverter.fromYarnQueueUserAclsInfo(super
+      .getQueueAclsInfo());
   }
 
-
   public QueueInfo[] getQueues() throws IOException, InterruptedException {
-    List<org.apache.hadoop.yarn.api.records.QueueInfo> queues = 
-      new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
-
-    org.apache.hadoop.yarn.api.records.QueueInfo rootQueue = 
-      applicationsManager.getQueueInfo(
-          getQueueInfoRequest(ROOT, false, true, true)).getQueueInfo();
-    getChildQueues(rootQueue, queues, true);
-
-    return TypeConverter.fromYarnQueueInfo(queues, this.conf);
+    return TypeConverter.fromYarnQueueInfo(super.getAllQueues(), this.conf);
   }
 
-
   public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
-    List<org.apache.hadoop.yarn.api.records.QueueInfo> queues = 
-      new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
-
-    org.apache.hadoop.yarn.api.records.QueueInfo rootQueue = 
-      applicationsManager.getQueueInfo(
-          getQueueInfoRequest(ROOT, false, true, true)).getQueueInfo();
-    getChildQueues(rootQueue, queues, false);
-
-    return TypeConverter.fromYarnQueueInfo(queues, this.conf);
+    return TypeConverter.fromYarnQueueInfo(super.getRootQueueInfos(), this.conf);
   }
 
   public QueueInfo[] getChildQueues(String parent) throws IOException,
       InterruptedException {
-      List<org.apache.hadoop.yarn.api.records.QueueInfo> queues = 
-          new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
-        
-        org.apache.hadoop.yarn.api.records.QueueInfo parentQueue = 
-          applicationsManager.getQueueInfo(
-              getQueueInfoRequest(parent, false, true, false)).getQueueInfo();
-        getChildQueues(parentQueue, queues, true);
-        
-        return TypeConverter.fromYarnQueueInfo(queues, this.conf);
+    return TypeConverter.fromYarnQueueInfo(super.getChildQueueInfos(parent),
+      this.conf);
   }
 
   public String getStagingAreaDir() throws IOException, InterruptedException {
@@ -307,40 +175,6 @@ public class ResourceMgrDelegate {
     return 0;
   }
   
-  
-  public ApplicationId submitApplication(
-      ApplicationSubmissionContext appContext) 
-  throws IOException {
-    appContext.setApplicationId(applicationId);
-    SubmitApplicationRequest request = 
-        recordFactory.newRecordInstance(SubmitApplicationRequest.class);
-    request.setApplicationSubmissionContext(appContext);
-    applicationsManager.submitApplication(request);
-    LOG.info("Submitted application " + applicationId + " to ResourceManager" +
-    		" at " + rmAddress);
-    return applicationId;
-  }
-  
-  public void killApplication(ApplicationId applicationId) throws IOException {
-    KillApplicationRequest request = 
-        recordFactory.newRecordInstance(KillApplicationRequest.class);
-    request.setApplicationId(applicationId);
-    applicationsManager.forceKillApplication(request);
-    LOG.info("Killing application " + applicationId);
-  }
-
-
-  public ApplicationReport getApplicationReport(ApplicationId appId)
-      throws YarnRemoteException {
-    GetApplicationReportRequest request = recordFactory
-        .newRecordInstance(GetApplicationReportRequest.class);
-    request.setApplicationId(appId);
-    GetApplicationReportResponse response = applicationsManager
-        .getApplicationReport(request);
-    ApplicationReport applicationReport = response.getApplicationReport();
-    return applicationReport;
-  }
-
   public ApplicationId getApplicationId() {
     return applicationId;
   }

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java Wed Sep  5 04:57:47 2012
@@ -89,7 +89,7 @@ import org.apache.hadoop.yarn.util.Proto
 /**
  * This class enables the current JobClient (0.22 hadoop) to run on YARN.
  */
-@SuppressWarnings({ "rawtypes", "unchecked", "deprecation" })
+@SuppressWarnings({ "rawtypes", "unchecked" })
 public class YARNRunner implements ClientProtocol {
 
   private static final Log LOG = LogFactory.getLog(YARNRunner.class);
@@ -345,10 +345,10 @@ public class YARNRunner implements Clien
         createApplicationResource(defaultFileContext,
             jobConfPath, LocalResourceType.FILE));
     if (jobConf.get(MRJobConfig.JAR) != null) {
+      Path jobJarPath = new Path(jobConf.get(MRJobConfig.JAR));
       localResources.put(MRJobConfig.JOB_JAR,
           createApplicationResource(defaultFileContext,
-              new Path(jobSubmitDir, MRJobConfig.JOB_JAR), 
-              LocalResourceType.ARCHIVE));
+              jobJarPath, LocalResourceType.ARCHIVE));
     } else {
       // Job jar may be null. For e.g, for pipes, the job jar is the hadoop
       // mapreduce jar itself which is already on the classpath.

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java Wed Sep  5 04:57:47 2012
@@ -21,6 +21,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 
 public class TestJobConf {
@@ -185,4 +186,19 @@ public class TestJobConf {
     
     
   }
+
+  /**
+   * Ensure that by default JobContext.MAX_TASK_FAILURES_PER_TRACKER is less
+   * JobContext.MAP_MAX_ATTEMPTS and JobContext.REDUCE_MAX_ATTEMPTS so that
+   * failed tasks will be retried on other nodes
+   */
+  @Test
+  public void testMaxTaskFailuresPerTracker() {
+    JobConf jobConf = new JobConf(true);
+    Assert.assertTrue("By default JobContext.MAX_TASK_FAILURES_PER_TRACKER was "
+      + "not less than JobContext.MAP_MAX_ATTEMPTS and REDUCE_MAX_ATTEMPTS"
+      ,jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxMapAttempts() &&
+      jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxReduceAttempts()
+      );
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java Wed Sep  5 04:57:47 2012
@@ -22,7 +22,9 @@ import java.net.InetAddress;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.mapred.*;
+import org.apache.hadoop.util.ReflectionUtils;
 
 /**
  * Base mapper class for IO operations.
@@ -41,6 +43,7 @@ public abstract class IOMapperBase<T> ex
   protected int bufferSize;
   protected FileSystem fs;
   protected String hostName;
+  protected CompressionCodec compressionCodec;
 
   public IOMapperBase() { 
   }
@@ -59,6 +62,22 @@ public abstract class IOMapperBase<T> ex
     } catch(Exception e) {
       hostName = "localhost";
     }
+    
+    //grab compression
+    String compression = getConf().get("test.io.compression.class", null);
+    Class<? extends CompressionCodec> codec;
+
+    //try to initialize codec
+    try {
+      codec = (compression == null) ? null : 
+     Class.forName(compression).asSubclass(CompressionCodec.class);
+    } catch(Exception e) {
+      throw new RuntimeException("Compression codec not found: ", e);
+    }
+
+    if(codec != null) {
+      compressionCodec = (CompressionCodec) ReflectionUtils.newInstance(codec, getConf());
+    }
   }
 
   public void close() throws IOException {

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java Wed Sep  5 04:57:47 2012
@@ -23,6 +23,7 @@ import java.io.DataInputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.PrintStream;
@@ -295,6 +296,8 @@ public class TestDFSIO extends TestCase 
       // create file
       OutputStream out;
       out = fs.create(new Path(getDataDir(getConf()), name), true, bufferSize);
+    
+      if(compressionCodec != null) out = compressionCodec.createOutputStream(out);
       
       try {
         // write to the file
@@ -358,6 +361,8 @@ public class TestDFSIO extends TestCase 
       OutputStream out;
       out = fs.append(new Path(getDataDir(getConf()), name), bufferSize);
       
+      if(compressionCodec != null) out = compressionCodec.createOutputStream(out);
+      
       try {
         // write to the file
         long nrRemaining;
@@ -394,7 +399,10 @@ public class TestDFSIO extends TestCase 
                        long totalSize // in bytes
                      ) throws IOException {
       // open file
-      DataInputStream in = fs.open(new Path(getDataDir(getConf()), name));
+      InputStream in = fs.open(new Path(getDataDir(getConf()), name));
+      
+      if(compressionCodec != null) in = compressionCodec.createInputStream(in);
+      
       long actualSize = 0;
       try {
         while (actualSize < totalSize) {
@@ -459,6 +467,7 @@ public class TestDFSIO extends TestCase 
     long fileSize = 1*MEGA;
     int nrFiles = 1;
     String resFileName = DEFAULT_RES_FILE_NAME;
+    String compressionClass = null;
     boolean isSequential = false;
     String version = TestDFSIO.class.getSimpleName() + ".0.0.6";
 
@@ -479,6 +488,8 @@ public class TestDFSIO extends TestCase 
         testType = TEST_TYPE_CLEANUP;
       } else if (args[i].startsWith("-seq")) {
         isSequential = true;
+      } else if (args[i].startsWith("-compression")) {
+        compressionClass = args[++i];
       } else if (args[i].equals("-nrFiles")) {
         nrFiles = Integer.parseInt(args[++i]);
       } else if (args[i].equals("-fileSize")) {
@@ -497,6 +508,11 @@ public class TestDFSIO extends TestCase 
     LOG.info("fileSize (MB) = " + toMB(fileSize));
     LOG.info("bufferSize = " + bufferSize);
     LOG.info("baseDir = " + getBaseDir(config));
+    
+    if(compressionClass != null) {
+      config.set("test.io.compression.class", compressionClass);
+      LOG.info("compressionClass = " + compressionClass);
+    }
 
     config.setInt("test.io.file.buffer.size", bufferSize);
     config.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java Wed Sep  5 04:57:47 2012
@@ -50,7 +50,7 @@ public class TestResourceMgrDelegate {
    */
   @Test
   public void testGetRootQueues() throws IOException, InterruptedException {
-    ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
+    final ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
     GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class);
     org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
       Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
@@ -59,12 +59,17 @@ public class TestResourceMgrDelegate {
       GetQueueInfoRequest.class))).thenReturn(response);
 
     ResourceMgrDelegate delegate = new ResourceMgrDelegate(
-      new YarnConfiguration(), applicationsManager);
+      new YarnConfiguration()) {
+      @Override
+      public synchronized void start() {
+        this.rmClient = applicationsManager;
+      }
+    };
     delegate.getRootQueues();
 
     ArgumentCaptor<GetQueueInfoRequest> argument =
       ArgumentCaptor.forClass(GetQueueInfoRequest.class);
-    Mockito.verify(delegate.applicationsManager).getQueueInfo(
+    Mockito.verify(applicationsManager).getQueueInfo(
       argument.capture());
 
     Assert.assertTrue("Children of root queue not requested",
@@ -75,7 +80,7 @@ public class TestResourceMgrDelegate {
 
   @Test
   public void tesAllJobs() throws Exception {
-    ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
+    final ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
     GetAllApplicationsResponse allApplicationsResponse = Records
         .newRecord(GetAllApplicationsResponse.class);
     List<ApplicationReport> applications = new ArrayList<ApplicationReport>();
@@ -93,7 +98,12 @@ public class TestResourceMgrDelegate {
             .any(GetAllApplicationsRequest.class))).thenReturn(
         allApplicationsResponse);
     ResourceMgrDelegate resourceMgrDelegate = new ResourceMgrDelegate(
-        new YarnConfiguration(), applicationsManager);
+      new YarnConfiguration()) {
+      @Override
+      public synchronized void start() {
+        this.rmClient = applicationsManager;
+      }
+    };
     JobStatus[] allJobs = resourceMgrDelegate.getAllJobs();
 
     Assert.assertEquals(State.FAILED, allJobs[0].getState());

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java Wed Sep  5 04:57:47 2012
@@ -18,9 +18,10 @@
 
 package org.apache.hadoop.mapreduce;
 
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.when;
+
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
@@ -104,14 +105,20 @@ public class TestYarnClientProtocolProvi
       rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
       rmDTToken.setService("0.0.0.0:8032");
       getDTResponse.setRMDelegationToken(rmDTToken);
-      ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class);
+      final ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class);
       when(cRMProtocol.getDelegationToken(any(
           GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
       ResourceMgrDelegate rmgrDelegate = new ResourceMgrDelegate(
-          new YarnConfiguration(conf), cRMProtocol);
+          new YarnConfiguration(conf)) {
+        @Override
+        public synchronized void start() {
+          this.rmClient = cRMProtocol;
+        }
+      };
       yrunner.setResourceMgrDelegate(rmgrDelegate);
       Token t = cluster.getDelegationToken(new Text(" "));
-      assertTrue("Testclusterkind".equals(t.getKind().toString()));
+      assertTrue("Token kind is instead " + t.getKind().toString(),
+        "Testclusterkind".equals(t.getKind().toString()));
     } finally {
       if (cluster != null) {
         cluster.close();

Modified: hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java?rev=1380990&r1=1380989&r2=1380990&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java Wed Sep  5 04:57:47 2012
@@ -88,8 +88,10 @@ public class TestUmbilicalProtocolWithJo
       .when(mockTT).getProtocolSignature(anyString(), anyLong(), anyInt());
 
     JobTokenSecretManager sm = new JobTokenSecretManager();
-    final Server server = RPC.getServer(TaskUmbilicalProtocol.class, mockTT,
-        ADDRESS, 0, 5, true, conf, sm);
+    final Server server = new RPC.Builder(conf)
+        .setProtocol(TaskUmbilicalProtocol.class).setInstance(mockTT)
+        .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
+        .setSecretManager(sm).build();
 
     server.start();
 



Mime
View raw message