hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vino...@apache.org
Subject svn commit: r1513258 [1/4] - in /hadoop/common/branches/YARN-321/hadoop-mapreduce-project: ./ bin/ conf/ dev-support/ hadoop-mapreduce-client/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/...
Date Mon, 12 Aug 2013 21:26:15 GMT
Author: vinodkv
Date: Mon Aug 12 21:25:49 2013
New Revision: 1513258

URL: http://svn.apache.org/r1513258
Log:
Forwarding YARN-321 branch to latest branch-2.

Added:
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java
      - copied unchanged from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
      - copied unchanged from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java
      - copied unchanged from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java
      - copied unchanged from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/client/
      - copied from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/client/
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/protocol/
      - copied from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/protocol/
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/protocolPB/
      - copied from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/protocolPB/
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/
      - copied from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/proto/
      - copied from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/proto/
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistory.java
      - copied unchanged from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistory.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/
      - copied from r1513205, hadoop/common/branches/branch-2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/
Modified:
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/bin/   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/bin/mapred
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/conf/   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/JobStateInternal.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ShuffleConsumerPlugin.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/WrappedMapper.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml   (contents, props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryServer.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TestSlive.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestAuditLogger.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedComparator.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestNonExistentJob.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-examples/   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/package.html   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/package.html   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/package.html   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/package.html   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/package.html   (props changed)
    hadoop/common/branches/YARN-321/hadoop-mapreduce-project/pom.xml

Propchange: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2/hadoop-mapreduce-project:r1503799-1513205
  Merged /hadoop/common/trunk/hadoop-mapreduce-project:r1380921,1507259

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/CHANGES.txt?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/CHANGES.txt (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/CHANGES.txt Mon Aug 12 21:25:49 2013
@@ -6,22 +6,40 @@ Release 2.3.0 - UNRELEASED
 
   NEW FEATURES
 
+    MAPREDUCE-5265. History server admin service to refresh user and superuser
+    group mappings (Ashwin Shankar via jlowe)
+
+    MAPREDUCE-5356. Ability to refresh aggregated log retention period and
+    check interval (Ashwin Shankar via jlowe)
+
+    MAPREDUCE-5386. Ability to refresh history server job retention and job
+    cleaner settings (Ashwin Shankar via jlowe)
+
+    MAPREDUCE-5411. Refresh size of loaded job cache on history server (Ashwin
+    Shankar via jlowe)
+
   IMPROVEMENTS
 
+    MAPREDUCE-434. LocalJobRunner limited to single reducer (Sandy Ryza and
+    Aaron Kimball via Sandy Ryza)
+
   OPTIMIZATIONS
 
+    MAPREDUCE-1981. Improve getSplits performance by using listLocatedStatus
+    (Hairong Kuang and Jason Lowe via jlowe)
+
   BUG FIXES
 
     MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal
     task-state (Ashwin Shankar via jlowe)
 
-    MAPREDUCE-3193. FileInputFormat doesn't read files recursively in the
-    input path dir (Devaraj K via jlowe)
+    MAPREDUCE-5380. Invalid mapred command should return non-zero exit code
+    (Stephen Chu via jlowe)
 
-    MAPREDUCE-5358. MRAppMaster throws invalid transitions for JobImpl
-    (Devaraj K via jlowe)
+    MAPREDUCE-5404. HSAdminServer does not use ephemeral ports in minicluster
+    mode (Ted Yu via jlowe)
 
-Release 2.2.0 - UNRELEASED
+Release 2.1.1-beta - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
@@ -31,21 +49,41 @@ Release 2.2.0 - UNRELEASED
 
   OPTIMIZATIONS
 
+    MAPREDUCE-5352. Optimize node local splits generated by
+    CombineFileInputFormat. (sseth)
+
+    MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race
+    conditions (jlowe via kihwal)
+
   BUG FIXES
 
-Release 2.1.1-beta - UNRELEASED
+    MAPREDUCE-5385. Fixed a bug with JobContext getCacheFiles API. (Omkar Vinit
+    Joshi via vinodkv)
 
-  INCOMPATIBLE CHANGES
+    MAPREDUCE-5428.  HistoryFileManager doesn't stop threads when service is
+    stopped (Karthik Kambatla via jlowe)
 
-  NEW FEATURES
+    MAPREDUCE-5251. Reducer should not implicate map attempt if it has
+    insufficient space to fetch map output (Ashwin Shankar via jlowe)
 
-  IMPROVEMENTS
+    MAPREDUCE-5317. Stale files left behind for failed jobs (Ravi Prakash via
+    jlowe)
 
-  OPTIMIZATIONS
+    MAPREDUCE-5358. MRAppMaster throws invalid transitions for JobImpl
+    (Devaraj K via jlowe)
 
-  BUG FIXES
+    MAPREDUCE-3193. FileInputFormat doesn't read files recursively in the
+    input path dir (Devaraj K via jlowe)
+
+    MAPREDUCE-5440. TestCopyCommitter Fails on JDK7 (Robert Parker via jlowe)
 
-Release 2.1.0-beta - 2013-07-02
+    MAPREDUCE-5367. Local jobs all use same local working directory
+    (Sandy Ryza)
+
+    MAPREDUCE-5425. Junit in TestJobHistoryServer failing in jdk 7 (Robert
+    Parker via jlowe)
+
+Release 2.1.0-beta - 2013-08-06
 
   INCOMPATIBLE CHANGES
 
@@ -207,6 +245,8 @@ Release 2.1.0-beta - 2013-07-02
     MAPREDUCE-5333. Add test that verifies MRAM works correctly when sending
     requests with non-normalized capabilities. (ywskycn via tucu)
 
+    MAPREDUCE-5398. MR changes for YARN-513 (Jian He via bikas)
+
   OPTIMIZATIONS
 
     MAPREDUCE-4974. Optimising the LineRecordReader initialize() method 
@@ -445,6 +485,40 @@ Release 2.1.0-beta - 2013-07-02
     MAPREDUCE-5325. MR changes related to YARN-727. ClientRMProtocol.getAllApplications
     should accept ApplicationType as a parameter. (Xuan Gong via hitesh)
 
+    MAPREDUCE-5291. Change MR App to use updated property names in
+    container-log4j.properties. (Zhijie Shen via sseth)
+
+    MAPREDUCE-5303. Changed MR app after moving ProtoBase to package impl.pb via
+    YARN-724. (Jian He via vinodkv)
+
+    MAPREDUCE-5312. TestRMNMInfo is failing. (sandyr via tucu)
+
+    MAPREDUCE-5304. mapreduce.Job killTask/failTask/getTaskCompletionEvents 
+    methods have incompatible signature changes. (kkambatl via tucu)
+
+    MAPREDUCE-5298. Moved MapReduce services to YARN-530 stricter lifecycle.
+    (Steve Loughran via vinodkv)
+
+    MAPREDUCE-5319. Set user.name in job.xml. (Xuan Gong via acmurthy)
+
+    MAPREDUCE-5310. MRAM should not normalize allocation request capabilities.
+    (tucu)
+
+    MAPREDUCE-5213. Re-assess TokenCache methods marked @Private. 
+    (kkambatl via tucu)
+
+    MAPREDUCE-5412. Update MR app to use multiple containers API of
+    ContainerManager after YARN-926. (Jian He via vinodkv)
+
+    MAPREDUCE-5421. Fixed TestNonExistentJob failure after YARN-873. (Junping Du
+    via vinodkv)
+
+    MAPREDUCE-5419. TestSlive is getting FileNotFound Exception (Robert Parker
+    via jlowe)
+
+    MAPREDUCE-5399. Unnecessary Configuration instantiation in IFileInputStream
+    slows down merge. (Stanislav Barton via Sandy Ryza)
+
   BREAKDOWN OF HADOOP-8562 SUBTASKS
 
     MAPREDUCE-4739. Some MapReduce tests fail to find winutils.
@@ -496,25 +570,6 @@ Release 2.1.0-beta - 2013-07-02
     MAPREDUCE-4374. Fix child task environment variable config and add support
     for Windows. (Chuan Liu via cnauroth)
 
-    MAPREDUCE-5291. Change MR App to use updated property names in
-    container-log4j.properties. (Zhijie Shen via sseth)
-
-    MAPREDUCE-5303. Changed MR app after moving ProtoBase to package impl.pb via
-    YARN-724. (Jian He via vinodkv)
-
-    MAPREDUCE-5312. TestRMNMInfo is failing. (sandyr via tucu)
-
-    MAPREDUCE-5304. mapreduce.Job killTask/failTask/getTaskCompletionEvents 
-    methods have incompatible signature changes. (kkambatl via tucu)
-
-    MAPREDUCE-5298. Moved MapReduce services to YARN-530 stricter lifecycle.
-    (Steve Loughran via vinodkv)
-
-    MAPREDUCE-5319. Set user.name in job.xml. (Xuan Gong via acmurthy)
-
-    MAPREDUCE-5310. MRAM should not normalize allocation request capabilities.
-    (tucu)
-
 Release 2.0.5-alpha - 06/06/2013
 
   INCOMPATIBLE CHANGES
@@ -1090,11 +1145,28 @@ Release 0.23.10 - UNRELEASED
 
   OPTIMIZATIONS
 
+    MAPREDUCE-1981. Improve getSplits performance by using listLocatedStatus
+    (Hairong Kuang and Jason Lowe via jlowe)
+
   BUG FIXES
 
     MAPREDUCE-3193. FileInputFormat doesn't read files recursively in the
     input path dir (Devaraj K via jlowe)
 
+    MAPREDUCE-5380. Invalid mapred command should return non-zero exit code
+    (Stephen Chu via jlowe)
+
+    MAPREDUCE-5317. Stale files left behind for failed jobs (Ravi Prakash via
+    jlowe)
+
+    MAPREDUCE-5251. Reducer should not implicate map attempt if it has
+    insufficient space to fetch map output (Ashwin Shankar via jlowe)
+
+    MAPREDUCE-5419. TestSlive is getting FileNotFound Exception (Robert Parker
+    via jlowe)
+
+    MAPREDUCE-5440. TestCopyCommitter Fails on JDK7 (Robert Parker via jlowe)
+
 Release 0.23.9 - 2013-07-08
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2/hadoop-mapreduce-project/CHANGES.txt:r1503799-1513205
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/CHANGES.txt:r1380921,1507259

Propchange: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/bin/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/bin:r1380921,1507259
  Merged /hadoop/common/branches/branch-2/hadoop-mapreduce-project/bin:r1503799-1513205

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/bin/mapred
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/bin/mapred?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/bin/mapred (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/bin/mapred Mon Aug 12 21:25:49 2013
@@ -38,6 +38,7 @@ function print_usage(){
   echo "  historyserver        run job history servers as a standalone daemon"
   echo "  distcp <srcurl> <desturl> copy file or directories recursively"
   echo "  archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+  echo "  hsadmin              job history server admin interface"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
 }
@@ -83,7 +84,7 @@ elif [ "$COMMAND" = "mradmin" ] \
   echo "Sorry, the $COMMAND command is no longer supported."
   echo "You may find similar functionality with the \"yarn\" shell command."
   print_usage
-  exit
+  exit 1
 elif [ "$COMMAND" = "distcp" ] ; then
   CLASS=org.apache.hadoop.tools.DistCp
   CLASSPATH=${CLASSPATH}:${TOOL_PATH}
@@ -92,10 +93,13 @@ elif [ "$COMMAND" = "archive" ] ; then
   CLASS=org.apache.hadoop.tools.HadoopArchives
   CLASSPATH=${CLASSPATH}:${TOOL_PATH}
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "hsadmin" ] ; then
+  CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
 else
   echo $COMMAND - invalid command
   print_usage
-  exit
+  exit 1
 fi
 
 # for developers, add mapred classes to CLASSPATH

Propchange: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/conf/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2/hadoop-mapreduce-project/conf:r1503799-1513205
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/conf:r1380921,1507259

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml Mon Aug 12 21:25:49 2013
@@ -458,6 +458,9 @@
     <Package name="org.apache.hadoop.yarn.proto" />
   </Match>
   <Match>
+    <Package name="org.apache.hadoop.mapreduce.v2.hs.proto" />
+  </Match>
+  <Match>
     <Class name="~org\.apache\.hadoop\.mapreduce\.v2\.proto.*" />
   </Match>
   

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml Mon Aug 12 21:25:49 2013
@@ -19,12 +19,12 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>2.2.0-SNAPSHOT</version>
+    <version>2.3.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-mapreduce-client-app</artifactId>
-  <version>2.2.0-SNAPSHOT</version>
+  <version>2.3.0-SNAPSHOT</version>
   <name>hadoop-mapreduce-client-app</name>
 
   <properties>

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java Mon Aug 12 21:25:49 2013
@@ -63,7 +63,7 @@ import org.apache.hadoop.yarn.exceptions
  * This class HAS to be in this package to access package private 
  * methods/classes.
  */
-@SuppressWarnings({"unchecked" , "deprecation"})
+@SuppressWarnings({"unchecked"})
 public class TaskAttemptListenerImpl extends CompositeService 
     implements TaskUmbilicalProtocol, TaskAttemptListener {
 
@@ -118,11 +118,14 @@ public class TaskAttemptListenerImpl ext
   protected void startRpcServer() {
     Configuration conf = getConfig();
     try {
-      server =
-          RPC.getServer(TaskUmbilicalProtocol.class, this, "0.0.0.0", 0, 
-              conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, 
-                  MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT),
-              false, conf, jobTokenSecretManager);
+      server = 
+          new RPC.Builder(conf).setProtocol(TaskUmbilicalProtocol.class)
+            .setInstance(this).setBindAddress("0.0.0.0")
+            .setPort(0).setNumHandlers(
+                conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, 
+                    MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT))
+                    .setVerbose(false).setSecretManager(jobTokenSecretManager)
+                    .build();
       
       // Enable service authorization?
       if (conf.getBoolean(

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/JobStateInternal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/JobStateInternal.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/JobStateInternal.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/JobStateInternal.java Mon Aug 12 21:25:49 2013
@@ -25,6 +25,7 @@ public enum JobStateInternal {
   RUNNING,
   COMMITTING,
   SUCCEEDED,
+  FAIL_WAIT,
   FAIL_ABORT,
   FAILED,
   KILL_WAIT,

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java Mon Aug 12 21:25:49 2013
@@ -44,6 +44,7 @@ public enum JobEventType {
 
   //Producer:Job
   JOB_COMPLETED,
+  JOB_FAIL_WAIT_TIMEDOUT,
 
   //Producer:Any component
   JOB_DIAGNOSTIC_UPDATE,

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java Mon Aug 12 21:25:49 2013
@@ -30,6 +30,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -313,7 +316,8 @@ public class JobImpl implements org.apac
           .addTransition
               (JobStateInternal.RUNNING,
               EnumSet.of(JobStateInternal.RUNNING,
-                  JobStateInternal.COMMITTING, JobStateInternal.FAIL_ABORT),
+                  JobStateInternal.COMMITTING, JobStateInternal.FAIL_WAIT,
+                  JobStateInternal.FAIL_ABORT),
               JobEventType.JOB_TASK_COMPLETED,
               new TaskCompletedTransition())
           .addTransition
@@ -424,7 +428,37 @@ public class JobImpl implements org.apac
                   JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
                   JobEventType.JOB_MAP_TASK_RESCHEDULED))
 
-          // Transitions from FAIL_ABORT state
+          // Transitions from FAIL_WAIT state
+          .addTransition(JobStateInternal.FAIL_WAIT,
+              JobStateInternal.FAIL_WAIT,
+              JobEventType.JOB_DIAGNOSTIC_UPDATE,
+              DIAGNOSTIC_UPDATE_TRANSITION)
+          .addTransition(JobStateInternal.FAIL_WAIT,
+              JobStateInternal.FAIL_WAIT,
+              JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
+          .addTransition(JobStateInternal.FAIL_WAIT,
+              EnumSet.of(JobStateInternal.FAIL_WAIT, JobStateInternal.FAIL_ABORT),
+              JobEventType.JOB_TASK_COMPLETED, 
+              new JobFailWaitTransition())
+          .addTransition(JobStateInternal.FAIL_WAIT,
+              JobStateInternal.FAIL_ABORT, JobEventType.JOB_FAIL_WAIT_TIMEDOUT, 
+              new JobFailWaitTimedOutTransition())
+          .addTransition(JobStateInternal.FAIL_WAIT, JobStateInternal.KILLED,
+              JobEventType.JOB_KILL,
+              new KilledDuringAbortTransition())
+          .addTransition(JobStateInternal.FAIL_WAIT,
+              JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
+              INTERNAL_ERROR_TRANSITION)
+          // Ignore-able events
+          .addTransition(JobStateInternal.FAIL_WAIT,
+              JobStateInternal.FAIL_WAIT,
+              EnumSet.of(JobEventType.JOB_UPDATED_NODES,
+                  JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
+                  JobEventType.JOB_MAP_TASK_RESCHEDULED,
+                  JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
+                  JobEventType.JOB_AM_REBOOT))
+
+          //Transitions from FAIL_ABORT state
           .addTransition(JobStateInternal.FAIL_ABORT,
               JobStateInternal.FAIL_ABORT,
               JobEventType.JOB_DIAGNOSTIC_UPDATE,
@@ -451,7 +485,8 @@ public class JobImpl implements org.apac
                   JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
                   JobEventType.JOB_COMMIT_COMPLETED,
                   JobEventType.JOB_COMMIT_FAILED,
-                  JobEventType.JOB_AM_REBOOT))
+                  JobEventType.JOB_AM_REBOOT,
+                  JobEventType.JOB_FAIL_WAIT_TIMEDOUT))
 
           // Transitions from KILL_ABORT state
           .addTransition(JobStateInternal.KILL_ABORT,
@@ -602,6 +637,10 @@ public class JobImpl implements org.apac
   
   private JobStateInternal forcedState = null;
 
+  //Executor used for running future tasks. Setting thread pool size to 1
+  private ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1);
+  private ScheduledFuture failWaitTriggerScheduledFuture;
+
   public JobImpl(JobId jobId, ApplicationAttemptId applicationAttemptId,
       Configuration conf, EventHandler eventHandler,
       TaskAttemptListener taskAttemptListener,
@@ -962,6 +1001,7 @@ public class JobImpl implements org.apac
     case SETUP:
     case COMMITTING:
       return JobState.RUNNING;
+    case FAIL_WAIT:
     case FAIL_ABORT:
       return JobState.FAILED;
     case REBOOT:
@@ -1565,7 +1605,43 @@ public class JobImpl implements org.apac
       job.unsuccessfulFinish(finalState);
     }
   }
-    
+
+  //This transition happens when a job is to be failed. It waits for all the
+  //tasks to finish / be killed.
+  private static class JobFailWaitTransition
+  implements MultipleArcTransition<JobImpl, JobEvent, JobStateInternal> {
+    @Override
+    public JobStateInternal transition(JobImpl job, JobEvent event) {
+      if(!job.failWaitTriggerScheduledFuture.isCancelled()) {
+        for(Task task: job.tasks.values()) {
+          if(!task.isFinished()) {
+            return JobStateInternal.FAIL_WAIT;
+          }
+        }
+      }
+      //Finished waiting. All tasks finished / were killed
+      job.failWaitTriggerScheduledFuture.cancel(false);
+      job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
+        job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
+      return JobStateInternal.FAIL_ABORT;
+    }
+  }
+
+  //This transition happens when a job to be failed times out while waiting on
+  //tasks that had been sent the KILL signal. It is triggered by a
+  //ScheduledFuture task queued in the executor.
+  private static class JobFailWaitTimedOutTransition
+  implements SingleArcTransition<JobImpl, JobEvent> {
+    @Override
+    public void transition(JobImpl job, JobEvent event) {
+      LOG.info("Timeout expired in FAIL_WAIT waiting for tasks to get killed."
+        + " Going to fail job anyway");
+      job.failWaitTriggerScheduledFuture.cancel(false);
+      job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
+        job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
+    }
+  }
+
   // JobFinishedEvent triggers the move of the history file out of the staging
   // area. May need to create a new event type for this if JobFinished should 
   // not be generated for KilledJobs, etc.
@@ -1798,6 +1874,23 @@ public class JobImpl implements org.apac
       return checkJobAfterTaskCompletion(job);
     }
 
+    //This class is used to queue a ScheduledFuture to send an event to a job
+    //after some delay. This can be used to wait for maximum amount of time
+    //before proceeding anyway. e.g. When a job is waiting in FAIL_WAIT for
+    //all tasks to be killed.
+    static class TriggerScheduledFuture implements Runnable {
+      JobEvent toSend;
+      JobImpl job;
+      TriggerScheduledFuture(JobImpl job, JobEvent toSend) {
+        this.toSend = toSend;
+        this.job = job;
+      }
+      public void run() {
+        LOG.info("Sending event " + toSend + " to " + job.getID());
+        job.getEventHandler().handle(toSend);
+      }
+    }
+
     protected JobStateInternal checkJobAfterTaskCompletion(JobImpl job) {
       //check for Job failure
       if (job.failedMapTaskCount*100 > 
@@ -1811,10 +1904,33 @@ public class JobImpl implements org.apac
             " failedReduces:" + job.failedReduceTaskCount;
         LOG.info(diagnosticMsg);
         job.addDiagnostic(diagnosticMsg);
-        job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
-            job.jobContext,
-            org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
-        return JobStateInternal.FAIL_ABORT;
+
+        //Send kill signal to all unfinished tasks here.
+        boolean allDone = true;
+        for (Task task : job.tasks.values()) {
+          if(!task.isFinished()) {
+            allDone = false;
+            job.eventHandler.handle(
+              new TaskEvent(task.getID(), TaskEventType.T_KILL));
+          }
+        }
+
+        //If all tasks are already done, we should go directly to FAIL_ABORT
+        if(allDone) {
+          job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
+            job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.FAILED)
+          );
+          return JobStateInternal.FAIL_ABORT;
+        }
+
+        //Set max timeout to wait for the tasks to get killed
+        job.failWaitTriggerScheduledFuture = job.executor.schedule(
+          new TriggerScheduledFuture(job, new JobEvent(job.getID(),
+            JobEventType.JOB_FAIL_WAIT_TIMEDOUT)), job.conf.getInt(
+                MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS,
+                MRJobConfig.DEFAULT_MR_AM_COMMITTER_CANCEL_TIMEOUT_MS),
+                TimeUnit.MILLISECONDS);
+        return JobStateInternal.FAIL_WAIT;
       }
       
       return job.checkReadyForCommit();

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java Mon Aug 12 21:25:49 2013
@@ -20,7 +20,9 @@ package org.apache.hadoop.mapreduce.v2.a
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
@@ -44,14 +46,15 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy;
 import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.hadoop.yarn.util.Records;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
@@ -139,13 +142,18 @@ public class ContainerLauncherImpl exten
           event.getContainerLaunchContext();
 
         // Now launch the actual container
-        StartContainerRequest startRequest = Records
-          .newRecord(StartContainerRequest.class);
-        startRequest.setContainerLaunchContext(containerLaunchContext);
-        startRequest.setContainerToken(event.getContainerToken());
-        StartContainerResponse response =
-            proxy.getContainerManagementProtocol().startContainer(startRequest);
-
+        StartContainerRequest startRequest =
+            StartContainerRequest.newInstance(containerLaunchContext,
+              event.getContainerToken());
+        List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
+        list.add(startRequest);
+        StartContainersRequest requestList = StartContainersRequest.newInstance(list);
+        StartContainersResponse response =
+            proxy.getContainerManagementProtocol().startContainers(requestList);
+        if (response.getFailedRequests() != null
+            && response.getFailedRequests().containsKey(containerID)) {
+          throw response.getFailedRequests().get(containerID).deSerialize();
+        }
         ByteBuffer portInfo =
             response.getAllServicesMetaData().get(
                 ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
@@ -192,13 +200,17 @@ public class ContainerLauncherImpl exten
           proxy = getCMProxy(this.containerMgrAddress, this.containerID);
 
           // kill the remote container if already launched
-          StopContainerRequest stopRequest = Records
-              .newRecord(StopContainerRequest.class);
-          stopRequest.setContainerId(this.containerID);
-          proxy.getContainerManagementProtocol().stopContainer(stopRequest);
-
+          List<ContainerId> ids = new ArrayList<ContainerId>();
+          ids.add(this.containerID);
+          StopContainersRequest request = StopContainersRequest.newInstance(ids);
+          StopContainersResponse response =
+              proxy.getContainerManagementProtocol().stopContainers(request);
+          if (response.getFailedRequests() != null
+              && response.getFailedRequests().containsKey(this.containerID)) {
+            throw response.getFailedRequests().get(this.containerID)
+              .deSerialize();
+          }
         } catch (Throwable t) {
-
           // ignore the cleanup failure
           String message = "cleanup failed for container "
               + this.containerID + " : "

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java Mon Aug 12 21:25:49 2013
@@ -93,9 +93,9 @@ public class LocalContainerAllocator ext
   @SuppressWarnings("unchecked")
   @Override
   protected synchronized void heartbeat() throws Exception {
-    AllocateRequest allocateRequest = AllocateRequest.newInstance(
-        this.applicationAttemptId, this.lastResponseID, super
-            .getApplicationProgress(), new ArrayList<ResourceRequest>(),
+    AllocateRequest allocateRequest =
+        AllocateRequest.newInstance(this.lastResponseID,
+          super.getApplicationProgress(), new ArrayList<ResourceRequest>(),
         new ArrayList<ContainerId>(), null);
     AllocateResponse allocateResponse;
     try {
@@ -143,7 +143,7 @@ public class LocalContainerAllocator ext
       LOG.info("Processing the event " + event.toString());
       // Assign the same container ID as the AM
       ContainerId cID =
-          ContainerId.newInstance(applicationAttemptId,
+          ContainerId.newInstance(getContext().getApplicationAttemptId(),
             this.containerId.getId());
       Container container = recordFactory.newRecordInstance(Container.class);
       container.setId(cID);

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java Mon Aug 12 21:25:49 2013
@@ -21,7 +21,6 @@ package org.apache.hadoop.mapreduce.v2.a
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
-import java.security.PrivilegedAction;
 import java.util.Map;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -46,16 +45,14 @@ import org.apache.hadoop.yarn.api.protoc
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.ipc.YarnRPC;
 
 /**
  * Registers/unregisters to RM and sends heartbeats to RM.
@@ -65,7 +62,6 @@ public abstract class RMCommunicator ext
   private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
   private int rmPollInterval;//millis
   protected ApplicationId applicationId;
-  protected ApplicationAttemptId applicationAttemptId;
   private final AtomicBoolean stopped;
   protected Thread allocatorThread;
   @SuppressWarnings("rawtypes")
@@ -93,7 +89,6 @@ public abstract class RMCommunicator ext
     this.context = context;
     this.eventHandler = context.getEventHandler();
     this.applicationId = context.getApplicationID();
-    this.applicationAttemptId = context.getApplicationAttemptId();
     this.stopped = new AtomicBoolean(false);
     this.heartbeatCallbacks = new ConcurrentLinkedQueue<Runnable>();
   }
@@ -144,7 +139,6 @@ public abstract class RMCommunicator ext
     try {
       RegisterApplicationMasterRequest request =
         recordFactory.newRecordInstance(RegisterApplicationMasterRequest.class);
-      request.setApplicationAttemptId(applicationAttemptId);
       if (serviceAddr != null) {
         request.setHost(serviceAddr.getHostName());
         request.setRpcPort(serviceAddr.getPort());
@@ -195,11 +189,8 @@ public abstract class RMCommunicator ext
       LOG.info("History url is " + historyUrl);
 
       FinishApplicationMasterRequest request =
-          recordFactory.newRecordInstance(FinishApplicationMasterRequest.class);
-      request.setAppAttemptId(this.applicationAttemptId);
-      request.setFinalApplicationStatus(finishState);
-      request.setDiagnostics(sb.toString());
-      request.setTrackingUrl(historyUrl);
+          FinishApplicationMasterRequest.newInstance(finishState,
+            sb.toString(), historyUrl);
       scheduler.finishApplicationMaster(request);
     } catch(Exception are) {
       LOG.error("Exception while unregistering ", are);
@@ -265,27 +256,12 @@ public abstract class RMCommunicator ext
 
   protected ApplicationMasterProtocol createSchedulerProxy() {
     final Configuration conf = getConfig();
-    final YarnRPC rpc = YarnRPC.create(conf);
-    final InetSocketAddress serviceAddr = conf.getSocketAddr(
-        YarnConfiguration.RM_SCHEDULER_ADDRESS,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
 
-    UserGroupInformation currentUser;
     try {
-      currentUser = UserGroupInformation.getCurrentUser();
+      return ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
     } catch (IOException e) {
       throw new YarnRuntimeException(e);
     }
-
-    // CurrentUser should already have AMToken loaded.
-    return currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
-      @Override
-      public ApplicationMasterProtocol run() {
-        return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class,
-            serviceAddr, conf);
-      }
-    });
   }
 
   protected abstract void heartbeat() throws Exception;

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java Mon Aug 12 21:25:49 2013
@@ -1160,14 +1160,6 @@ public class RMContainerAllocator extend
     TaskAttemptId get(ContainerId cId) {
       return containerToAttemptMap.get(cId);
     }
-    
-    NodeId getNodeId(TaskAttemptId tId) {
-      if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
-        return maps.get(tId).getNodeId();
-      } else {
-        return reduces.get(tId).getNodeId();
-      }
-    }
 
     ContainerId get(TaskAttemptId tId) {
       Container taskContainer;

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java Mon Aug 12 21:25:49 2013
@@ -145,10 +145,10 @@ public abstract class RMContainerRequest
   }
 
   protected AllocateResponse makeRemoteRequest() throws IOException {
-    AllocateRequest allocateRequest = AllocateRequest.newInstance(
-        applicationAttemptId, lastResponseID, super.getApplicationProgress(),
-        new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>(
-            release), null);
+    AllocateRequest allocateRequest =
+        AllocateRequest.newInstance(lastResponseID,
+          super.getApplicationProgress(), new ArrayList<ResourceRequest>(ask),
+          new ArrayList<ContainerId>(release), null);
     AllocateResponse allocateResponse;
     try {
       allocateResponse = scheduler.allocate(allocateRequest);

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java Mon Aug 12 21:25:49 2013
@@ -357,15 +357,20 @@ public class MRApp extends MRAppMaster {
   }
 
   public void waitForState(Service.STATE finalState) throws Exception {
-    int timeoutSecs = 0;
-    while (!finalState.equals(getServiceState()) && timeoutSecs++ < 20) {
-      System.out.println("MRApp State is : " + getServiceState()
-          + " Waiting for state : " + finalState);
-      Thread.sleep(500);
-    }
-    System.out.println("MRApp State is : " + getServiceState());
-    Assert.assertEquals("MRApp state is not correct (timedout)", finalState,
-        getServiceState());
+    if (finalState == Service.STATE.STOPPED) {
+       Assert.assertTrue("Timeout while waiting for MRApp to stop",
+           waitForServiceToStop(20 * 1000));
+    } else {
+      int timeoutSecs = 0;
+      while (!finalState.equals(getServiceState()) && timeoutSecs++ < 20) {
+        System.out.println("MRApp State is : " + getServiceState()
+            + " Waiting for state : " + finalState);
+        Thread.sleep(500);
+      }
+      System.out.println("MRApp State is : " + getServiceState());
+      Assert.assertEquals("MRApp state is not correct (timedout)", finalState,
+          getServiceState());
+    }
   }
 
   public void verifyCompleted() {

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java Mon Aug 12 21:25:49 2013
@@ -234,7 +234,7 @@ public class MRAppBenchmark {
                   for (int i = 0; i < numContainers; i++) {
                     ContainerId containerId =
                         ContainerId.newInstance(
-                          request.getApplicationAttemptId(),
+                          getContext().getApplicationAttemptId(),
                           request.getResponseId() + i);
                     containers.add(Container.newInstance(containerId,
                       NodeId.newInstance("host" + containerId.getId(), 2345),

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java Mon Aug 12 21:25:49 2013
@@ -71,6 +71,8 @@ import org.apache.hadoop.mapreduce.v2.ut
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -87,6 +89,7 @@ import org.apache.hadoop.yarn.event.Even
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -1392,6 +1395,18 @@ public class TestRMContainerAllocator {
 
     @Override
     protected void register() {
+      ApplicationAttemptId attemptId = getContext().getApplicationAttemptId();
+      UserGroupInformation ugi =
+          UserGroupInformation.createRemoteUser(attemptId.toString());
+      Token<AMRMTokenIdentifier> token =
+          rm.getRMContext().getRMApps().get(attemptId.getApplicationId())
+            .getRMAppAttempt(attemptId).getAMRMToken();
+      try {
+        ugi.addTokenIdentifier(token.decodeIdentifier());
+      } catch (IOException e) {
+        throw new YarnRuntimeException(e);
+      }
+      UserGroupInformation.setLoginUser(ugi);
       super.register();
     }
 

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java Mon Aug 12 21:25:49 2013
@@ -57,13 +57,17 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler;
 import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.InitTransition;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
@@ -74,7 +78,9 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.event.InlineDispatcher;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -84,6 +90,7 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 
 /**
@@ -332,6 +339,78 @@ public class TestJobImpl {
     commitHandler.stop();
   }
 
+  @Test
+  public void testAbortJobCalledAfterKillingTasks() throws IOException {
+    Configuration conf = new Configuration();
+    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
+    conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
+    InlineDispatcher dispatcher = new InlineDispatcher();
+    dispatcher.init(conf);
+    dispatcher.start();
+    OutputCommitter committer = Mockito.mock(OutputCommitter.class);
+    CommitterEventHandler commitHandler =
+        createCommitterEventHandler(dispatcher, committer);
+    commitHandler.init(conf);
+    commitHandler.start();
+    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2);
+
+    //Fail one task. This should land the JobImpl in the FAIL_WAIT state
+    job.handle(new JobTaskEvent(
+      MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
+      TaskState.FAILED));
+    //Verify abort job hasn't been called
+    Mockito.verify(committer, Mockito.never())
+      .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
+    assertJobState(job, JobStateInternal.FAIL_WAIT);
+
+    //Verify abortJob is called once and the job failed
+    Mockito.verify(committer, Mockito.timeout(2000).times(1))
+      .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
+    assertJobState(job, JobStateInternal.FAILED);
+
+    dispatcher.stop();
+  }
+
+  @Test (timeout=10000)
+  public void testFailAbortDoesntHang() throws IOException {
+    Configuration conf = new Configuration();
+    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
+    conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
+    
+    DrainDispatcher dispatcher = new DrainDispatcher();
+    dispatcher.init(conf);
+    dispatcher.start();
+    OutputCommitter committer = Mockito.mock(OutputCommitter.class);
+    CommitterEventHandler commitHandler =
+        createCommitterEventHandler(dispatcher, committer);
+    commitHandler.init(conf);
+    commitHandler.start();
+    //Job has only 1 mapper task. No reducers
+    conf.setInt(MRJobConfig.NUM_REDUCES, 0);
+    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
+    JobImpl job = createRunningStubbedJob(conf, dispatcher, 1);
+
+    //Fail / finish all the tasks. This should land the JobImpl directly in the
+    //FAIL_ABORT state
+    for(Task t: job.tasks.values()) {
+      TaskImpl task = (TaskImpl) t;
+      task.handle(new TaskEvent(task.getID(), TaskEventType.T_SCHEDULE));
+      for(TaskAttempt ta: task.getAttempts().values()) {
+        task.handle(new TaskTAttemptEvent(ta.getID(),
+          TaskEventType.T_ATTEMPT_FAILED));
+      }
+    }
+    assertJobState(job, JobStateInternal.FAIL_ABORT);
+
+    dispatcher.await();
+    //Verify abortJob is called once and the job failed
+    Mockito.verify(committer, Mockito.timeout(2000).times(1))
+      .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
+    assertJobState(job, JobStateInternal.FAILED);
+
+    dispatcher.stop();
+  }
+
   @Test(timeout=20000)
   public void testKilledDuringFailAbort() throws Exception {
     Configuration conf = new Configuration();

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java Mon Aug 12 21:25:49 2013
@@ -24,6 +24,8 @@ import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -52,12 +54,13 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -393,18 +396,18 @@ public class TestContainerLauncher {
     private ContainerStatus status = null;
 
     @Override
-    public GetContainerStatusResponse getContainerStatus(
-        GetContainerStatusRequest request) throws IOException {
-      GetContainerStatusResponse response = recordFactory
-          .newRecordInstance(GetContainerStatusResponse.class);
-      response.setStatus(status);
-      return response;
+    public GetContainerStatusesResponse getContainerStatuses(
+        GetContainerStatusesRequest request) throws IOException {
+      List<ContainerStatus> statuses = new ArrayList<ContainerStatus>();
+      statuses.add(status);
+      return GetContainerStatusesResponse.newInstance(statuses, null);
     }
 
     @Override
-    public StartContainerResponse startContainer(StartContainerRequest request)
+    public StartContainersResponse startContainers(StartContainersRequest requests)
         throws IOException {
 
+      StartContainerRequest request = requests.getStartContainerRequests().get(0);
       ContainerTokenIdentifier containerTokenIdentifier =
           MRApp.newContainerTokenIdentifier(request.getContainerToken());
 
@@ -412,8 +415,8 @@ public class TestContainerLauncher {
       Assert.assertEquals(MRApp.NM_HOST + ":" + MRApp.NM_PORT,
         containerTokenIdentifier.getNmHostAddress());
 
-      StartContainerResponse response = recordFactory
-          .newRecordInstance(StartContainerResponse.class);
+      StartContainersResponse response = recordFactory
+          .newRecordInstance(StartContainersResponse.class);
       status = recordFactory.newRecordInstance(ContainerStatus.class);
       try {
         // make the thread sleep to look like its not going to respond
@@ -429,7 +432,7 @@ public class TestContainerLauncher {
     }
 
     @Override
-    public StopContainerResponse stopContainer(StopContainerRequest request)
+    public StopContainersResponse stopContainers(StopContainersRequest request)
         throws IOException {
       Exception e = new Exception("Dummy function", new Exception(
           "Dummy function cause"));

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java Mon Aug 12 21:25:49 2013
@@ -45,12 +45,12 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher.EventType;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -162,8 +162,8 @@ public class TestContainerLauncherImpl {
     try {
       ContainerId contId = makeContainerId(0l, 0, 0, 1);
       TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
-      StartContainerResponse startResp = 
-        recordFactory.newRecordInstance(StartContainerResponse.class);
+      StartContainersResponse startResp =
+        recordFactory.newRecordInstance(StartContainersResponse.class);
       startResp.setAllServicesMetaData(serviceResponse);
       
 
@@ -176,14 +176,14 @@ public class TestContainerLauncherImpl {
         .thenReturn(contId);
       when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
       when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
-      when(mockCM.startContainer(any(StartContainerRequest.class))).thenReturn(startResp);
+      when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
       when(mockLaunchEvent.getContainerToken()).thenReturn(
           createNewContainerToken(contId, cmAddress));
       ut.handle(mockLaunchEvent);
       
       ut.waitForPoolToIdle();
       
-      verify(mockCM).startContainer(any(StartContainerRequest.class));
+      verify(mockCM).startContainers(any(StartContainersRequest.class));
       
       LOG.info("inserting cleanup event");
       ContainerLauncherEvent mockCleanupEvent = 
@@ -198,7 +198,7 @@ public class TestContainerLauncherImpl {
       
       ut.waitForPoolToIdle();
       
-      verify(mockCM).stopContainer(any(StopContainerRequest.class));
+      verify(mockCM).stopContainers(any(StopContainersRequest.class));
     } finally {
       ut.stop();
     }
@@ -224,8 +224,8 @@ public class TestContainerLauncherImpl {
       ContainerId contId = makeContainerId(0l, 0, 0, 1);
       TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
       String cmAddress = "127.0.0.1:8000";
-      StartContainerResponse startResp = 
-        recordFactory.newRecordInstance(StartContainerResponse.class);
+      StartContainersResponse startResp =
+        recordFactory.newRecordInstance(StartContainersResponse.class);
       startResp.setAllServicesMetaData(serviceResponse);
 
       LOG.info("inserting cleanup event");
@@ -241,7 +241,7 @@ public class TestContainerLauncherImpl {
       
       ut.waitForPoolToIdle();
       
-      verify(mockCM, never()).stopContainer(any(StopContainerRequest.class));
+      verify(mockCM, never()).stopContainers(any(StopContainersRequest.class));
 
       LOG.info("inserting launch event");
       ContainerRemoteLaunchEvent mockLaunchEvent = 
@@ -252,14 +252,14 @@ public class TestContainerLauncherImpl {
         .thenReturn(contId);
       when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
       when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
-      when(mockCM.startContainer(any(StartContainerRequest.class))).thenReturn(startResp);
+      when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
       when(mockLaunchEvent.getContainerToken()).thenReturn(
           createNewContainerToken(contId, cmAddress));
       ut.handle(mockLaunchEvent);
       
       ut.waitForPoolToIdle();
       
-      verify(mockCM, never()).startContainer(any(StartContainerRequest.class));
+      verify(mockCM, never()).startContainers(any(StartContainersRequest.class));
     } finally {
       ut.stop();
     }
@@ -286,8 +286,8 @@ public class TestContainerLauncherImpl {
       ContainerId contId = makeContainerId(0l, 0, 0, 1);
       TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
       String cmAddress = "127.0.0.1:8000";
-      StartContainerResponse startResp =
-        recordFactory.newRecordInstance(StartContainerResponse.class);
+      StartContainersResponse startResp =
+        recordFactory.newRecordInstance(StartContainersResponse.class);
       startResp.setAllServicesMetaData(serviceResponse);
 
       LOG.info("inserting launch event");
@@ -299,20 +299,20 @@ public class TestContainerLauncherImpl {
         .thenReturn(contId);
       when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
       when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
-      when(mockCM.startContainer(any(StartContainerRequest.class))).thenReturn(startResp);
+      when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
       when(mockLaunchEvent.getContainerToken()).thenReturn(
           createNewContainerToken(contId, cmAddress));
       ut.handle(mockLaunchEvent);
 
       ut.waitForPoolToIdle();
 
-      verify(mockCM).startContainer(any(StartContainerRequest.class));
+      verify(mockCM).startContainers(any(StartContainersRequest.class));
 
       // skip cleanup and make sure stop kills the container
 
     } finally {
       ut.stop();
-      verify(mockCM).stopContainer(any(StopContainerRequest.class));
+      verify(mockCM).stopContainers(any(StopContainersRequest.class));
     }
   }
   
@@ -341,8 +341,8 @@ public class TestContainerLauncherImpl {
       ContainerId contId = makeContainerId(0l, 0, 0, 1);
       TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
       String cmAddress = "127.0.0.1:8000";
-      StartContainerResponse startResp = 
-        recordFactory.newRecordInstance(StartContainerResponse.class);
+      StartContainersResponse startResp =
+        recordFactory.newRecordInstance(StartContainersResponse.class);
       startResp.setAllServicesMetaData(serviceResponse);
       
      
@@ -415,7 +415,7 @@ public class TestContainerLauncherImpl {
       this.completeLaunchBarrier = completeLaunchBarrier;
     }
     @Override
-    public StartContainerResponse startContainer(StartContainerRequest request)
+    public StartContainersResponse startContainers(StartContainersRequest request)
         throws IOException {
       try {
         startLaunchBarrier.await();
@@ -433,16 +433,14 @@ public class TestContainerLauncherImpl {
     }
 
     @Override
-    public StopContainerResponse stopContainer(StopContainerRequest request)
+    public StopContainersResponse stopContainers(StopContainersRequest request)
         throws IOException {
-    
       return null;
     }
 
     @Override
-    public GetContainerStatusResponse getContainerStatus(
-        GetContainerStatusRequest request) throws IOException {
-    
+    public GetContainerStatusesResponse getContainerStatuses(
+        GetContainerStatusesRequest request) throws IOException {
       return null;
     }
   }

Modified: hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml (original)
+++ hadoop/common/branches/YARN-321/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml Mon Aug 12 21:25:49 2013
@@ -19,12 +19,12 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>2.2.0-SNAPSHOT</version>
+    <version>2.3.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-mapreduce-client-common</artifactId>
-  <version>2.2.0-SNAPSHOT</version>
+  <version>2.3.0-SNAPSHOT</version>
   <name>hadoop-mapreduce-client-common</name>
 
   <properties>



Mime
View raw message