hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1354832 [2/3] - in /hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project: ./ conf/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/ hadoop-mapreduce-client/hadoop-mapreduce-clie...
Date Thu, 28 Jun 2012 07:00:39 GMT
Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java Thu Jun 28 06:59:38 2012
@@ -18,15 +18,24 @@
 
 package org.apache.hadoop.mapreduce.v2.app;
 
+import static org.mockito.Matchers.anyFloat;
+import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.isA;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import junit.framework.Assert;
@@ -46,9 +55,11 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
@@ -63,9 +74,10 @@ import org.apache.hadoop.yarn.api.AMRMPr
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.Event;
@@ -74,13 +86,11 @@ import org.apache.hadoop.yarn.factories.
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
-import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -426,29 +436,21 @@ public class TestRMContainerAllocator {
 
     // Finish off 1 map.
     Iterator<Task> it = job.getTasks().values().iterator();
-    finishNextNTasks(mrApp, it, 1);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.095f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.095f, rmApp.getProgress(), 0.001f);
 
     // Finish off 7 more so that map-progress is 80%
-    finishNextNTasks(mrApp, it, 7);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 7);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.41f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.41f, rmApp.getProgress(), 0.001f);
 
     // Finish off the 2 remaining maps
-    finishNextNTasks(mrApp, it, 2);
-
-    // Wait till all reduce-attempts request for containers
-    for (Task t : job.getTasks().values()) {
-      if (t.getType() == TaskType.REDUCE) {
-        mrApp.waitForState(t.getAttempts().values().iterator().next(),
-          TaskAttemptState.UNASSIGNED);
-      }
-    }
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2);
 
     allocator.schedule();
     rmDispatcher.await();
@@ -465,7 +467,7 @@ public class TestRMContainerAllocator {
     }
 
     // Finish off 2 reduces
-    finishNextNTasks(mrApp, it, 2);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2);
 
     allocator.schedule();
     rmDispatcher.await();
@@ -473,7 +475,7 @@ public class TestRMContainerAllocator {
     Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
 
     // Finish off the remaining 8 reduces.
-    finishNextNTasks(mrApp, it, 8);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 8);
     allocator.schedule();
     rmDispatcher.await();
     // Remaining is JobCleanup
@@ -481,19 +483,28 @@ public class TestRMContainerAllocator {
     Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
   }
 
-  private void finishNextNTasks(MRApp mrApp, Iterator<Task> it, int nextN)
-      throws Exception {
+  private void finishNextNTasks(DrainDispatcher rmDispatcher, MockNM node,
+      MRApp mrApp, Iterator<Task> it, int nextN) throws Exception {
     Task task;
     for (int i=0; i<nextN; i++) {
       task = it.next();
-      finishTask(mrApp, task);
+      finishTask(rmDispatcher, node, mrApp, task);
     }
   }
 
-  private void finishTask(MRApp mrApp, Task task) throws Exception {
+  private void finishTask(DrainDispatcher rmDispatcher, MockNM node,
+      MRApp mrApp, Task task) throws Exception {
     TaskAttempt attempt = task.getAttempts().values().iterator().next();
+    List<ContainerStatus> contStatus = new ArrayList<ContainerStatus>(1);
+    contStatus.add(BuilderUtils.newContainerStatus(attempt.getAssignedContainerID(),
+        ContainerState.COMPLETE, "", 0));
+    Map<ApplicationId,List<ContainerStatus>> statusUpdate =
+        new HashMap<ApplicationId,List<ContainerStatus>>(1);
+    statusUpdate.put(mrApp.getAppID(), contStatus);
+    node.nodeHeartbeat(statusUpdate, true);
+    rmDispatcher.await();
     mrApp.getContext().getEventHandler().handle(
-        new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_DONE));
+          new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_DONE));
     mrApp.waitForState(task, TaskState.SUCCEEDED);
   }
 
@@ -574,26 +585,108 @@ public class TestRMContainerAllocator {
     Iterator<Task> it = job.getTasks().values().iterator();
 
     // Finish off 1 map so that map-progress is 10%
-    finishNextNTasks(mrApp, it, 1);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.14f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.14f, rmApp.getProgress(), 0.001f);
 
     // Finish off 5 more map so that map-progress is 60%
-    finishNextNTasks(mrApp, it, 5);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 5);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.59f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
 
     // Finish off remaining map so that map-progress is 100%
-    finishNextNTasks(mrApp, it, 4);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 4);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
   }
+  
+  @Test
+  public void testUpdatedNodes() throws Exception {
+    Configuration conf = new Configuration();
+    MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+        .getDispatcher();
+
+    // Submit the application
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
+    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+        .getAppAttemptId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+    
+    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+    Job mockJob = mock(Job.class);
+    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+        appAttemptId, mockJob);
+
+    // add resources to scheduler
+    MockNM nm1 = rm.registerNode("h1:1234", 10240);
+    MockNM nm2 = rm.registerNode("h2:1234", 10240);
+    dispatcher.await();
+
+    // create the map container request
+    ContainerRequestEvent event = createReq(jobId, 1, 1024,
+        new String[] { "h1" });
+    allocator.sendRequest(event);
+    TaskAttemptId attemptId = event.getAttemptID();
+    
+    TaskAttempt mockTaskAttempt = mock(TaskAttempt.class);
+    when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
+    Task mockTask = mock(Task.class);
+    when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt);
+    when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask);
+
+    // this tells the scheduler about the requests
+    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
+    dispatcher.await();
+
+    nm1.nodeHeartbeat(true);
+    dispatcher.await();
+    // get the assignment
+    assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(1, assigned.size());
+    Assert.assertEquals(nm1.getNodeId(), assigned.get(0).getContainer().getNodeId());
+    // no updated nodes reported
+    Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
+    Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
+    
+    // mark nodes bad
+    nm1.nodeHeartbeat(false);
+    nm2.nodeHeartbeat(false);
+    dispatcher.await();
+    
+    // schedule response returns updated nodes
+    assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(0, assigned.size());
+    // updated nodes are reported
+    Assert.assertEquals(1, allocator.getJobUpdatedNodeEvents().size());
+    Assert.assertEquals(1, allocator.getTaskAttemptKillEvents().size());
+    Assert.assertEquals(2, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
+    Assert.assertEquals(attemptId, allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
+    allocator.getJobUpdatedNodeEvents().clear();
+    allocator.getTaskAttemptKillEvents().clear();
+    
+    assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(0, assigned.size());
+    // no updated nodes reported
+    Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
+    Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
+  }
 
   @Test
   public void testBlackListedNodes() throws Exception {
@@ -1100,7 +1193,10 @@ public class TestRMContainerAllocator {
   private static class MyContainerAllocator extends RMContainerAllocator {
     static final List<TaskAttemptContainerAssignedEvent> events
       = new ArrayList<TaskAttemptContainerAssignedEvent>();
-
+    static final List<TaskAttemptKillEvent> taskAttemptKillEvents 
+      = new ArrayList<TaskAttemptKillEvent>();
+    static final List<JobUpdatedNodesEvent> jobUpdatedNodeEvents 
+    = new ArrayList<JobUpdatedNodesEvent>();
     private MyResourceManager rm;
 
     private static AppContext createAppContext(
@@ -1119,6 +1215,10 @@ public class TestRMContainerAllocator {
           // Only capture interesting events.
           if (event instanceof TaskAttemptContainerAssignedEvent) {
             events.add((TaskAttemptContainerAssignedEvent) event);
+          } else if (event instanceof TaskAttemptKillEvent) {
+            taskAttemptKillEvents.add((TaskAttemptKillEvent)event);
+          } else if (event instanceof JobUpdatedNodesEvent) {
+            jobUpdatedNodeEvents.add((JobUpdatedNodesEvent)event);
           }
         }
       });
@@ -1202,6 +1302,14 @@ public class TestRMContainerAllocator {
       events.clear();
       return result;
     }
+    
+    List<TaskAttemptKillEvent> getTaskAttemptKillEvents() {
+      return taskAttemptKillEvents;
+    }
+    
+    List<JobUpdatedNodesEvent> getJobUpdatedNodeEvents() {
+      return jobUpdatedNodeEvents;
+    }
 
     @Override
     protected void startAllocatorThread() {
@@ -1239,6 +1347,18 @@ public class TestRMContainerAllocator {
         maxReduceRampupLimit, reduceSlowStart);
     verify(allocator, never()).setIsReduceStarted(true);
     
+    // verify slow-start still in effect when no more maps need to
+    // be scheduled but some have yet to complete
+    allocator.scheduleReduces(
+        totalMaps, succeededMaps,
+        0, scheduledReduces,
+        totalMaps - succeededMaps, assignedReduces,
+        mapResourceReqt, reduceResourceReqt,
+        numPendingReduces,
+        maxReduceRampupLimit, reduceSlowStart);
+    verify(allocator, never()).setIsReduceStarted(true);
+    verify(allocator, never()).scheduleAllReduces();
+
     succeededMaps = 3;
     allocator.scheduleReduces(
         totalMaps, succeededMaps, 

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java Thu Jun 28 06:59:38 2012
@@ -66,6 +66,7 @@ import org.apache.hadoop.yarn.SystemCloc
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -529,6 +530,11 @@ public class TestRuntimeEstimators {
     }
 
     @Override
+    public NodeId getNodeId() throws UnsupportedOperationException{
+      throw new UnsupportedOperationException();
+    }
+    
+    @Override
     public TaskAttemptId getID() {
       return myAttemptID;
     }

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java Thu Jun 28 06:59:38 2012
@@ -23,6 +23,7 @@ import com.google.common.collect.Maps;
 import java.io.File;
 import java.io.IOException;
 import java.net.MalformedURLException;
+import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URLClassLoader;
@@ -45,6 +46,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRConfig;
@@ -72,6 +74,8 @@ class LocalDistributedCacheManager {
   private List<String> localFiles = new ArrayList<String>();
   private List<String> localClasspaths = new ArrayList<String>();
   
+  private List<File> symlinksCreated = new ArrayList<File>();
+  
   private boolean setupCalled = false;
   
   /**
@@ -172,18 +176,51 @@ class LocalDistributedCacheManager {
               .size()])));
     }
     if (DistributedCache.getSymlink(conf)) {
-      // This is not supported largely because, 
-      // for a Child subprocess, the cwd in LocalJobRunner
-      // is not a fresh slate, but rather the user's working directory.
-      // This is further complicated because the logic in
-      // setupWorkDir only creates symlinks if there's a jarfile
-      // in the configuration.
-      LOG.warn("LocalJobRunner does not support " +
-          "symlinking into current working dir.");
+      File workDir = new File(System.getProperty("user.dir"));
+      URI[] archives = DistributedCache.getCacheArchives(conf);
+      URI[] files = DistributedCache.getCacheFiles(conf);
+      Path[] localArchives = DistributedCache.getLocalCacheArchives(conf);
+      Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
+      if (archives != null) {
+        for (int i = 0; i < archives.length; i++) {
+          String link = archives[i].getFragment();
+          String target = new File(localArchives[i].toUri()).getPath();
+          symlink(workDir, target, link);
+        }
+      }
+      if (files != null) {
+        for (int i = 0; i < files.length; i++) {
+          String link = files[i].getFragment();
+          String target = new File(localFiles[i].toUri()).getPath();
+          symlink(workDir, target, link);
+        }
+      }
     }
     setupCalled = true;
   }
   
+  /**
+   * Utility method for creating a symlink and warning on errors.
+   *
+   * If link is null, does nothing.
+   */
+  private void symlink(File workDir, String target, String link)
+      throws IOException {
+    if (link != null) {
+      link = workDir.toString() + Path.SEPARATOR + link;
+      File flink = new File(link);
+      if (!flink.exists()) {
+        LOG.info(String.format("Creating symlink: %s <- %s", target, link));
+        if (0 != FileUtil.symLink(target, link)) {
+          LOG.warn(String.format("Failed to create symlink: %s <- %s", target,
+              link));
+        } else {
+          symlinksCreated.add(new File(link));
+        }
+      }
+    }
+  }
+  
   /** 
    * Are the resources that should be added to the classpath? 
    * Should be called after setup().
@@ -217,6 +254,12 @@ class LocalDistributedCacheManager {
   }
 
   public void close() throws IOException {
+    for (File symlink : symlinksCreated) {
+      if (!symlink.delete()) {
+        LOG.warn("Failed to delete symlink created by the local job runner: " +
+            symlink);
+      }
+    }
     FileContext localFSFileContext = FileContext.getLocalFSFileContext();
     for (String archive : localArchives) {
       localFSFileContext.delete(new Path(archive), true);

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java Thu Jun 28 06:59:38 2012
@@ -44,6 +44,7 @@ import org.apache.hadoop.mapreduce.v2.ut
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
@@ -376,22 +377,27 @@ public class TypeConverter {
     }
     return reports;
   }
-
-  public static JobStatus.State fromYarn(YarnApplicationState state) {
-    switch (state) {
+  
+  public static State fromYarn(YarnApplicationState yarnApplicationState,
+      FinalApplicationStatus finalApplicationStatus) {
+    switch (yarnApplicationState) {
     case NEW:
     case SUBMITTED:
       return State.PREP;
     case RUNNING:
       return State.RUNNING;
     case FINISHED:
-      return State.SUCCEEDED;
+      if (finalApplicationStatus == FinalApplicationStatus.SUCCEEDED) {
+        return State.SUCCEEDED;
+      } else if (finalApplicationStatus == FinalApplicationStatus.KILLED) {
+        return State.KILLED;
+      }
     case FAILED:
       return State.FAILED;
     case KILLED:
       return State.KILLED;
     }
-    throw new YarnException("Unrecognized application state: " + state);
+    throw new YarnException("Unrecognized application state: " + yarnApplicationState);
   }
 
   private static final String TT_NAME_PREFIX = "tracker_";
@@ -417,7 +423,7 @@ public class TypeConverter {
       new JobStatus(
           TypeConverter.fromYarn(application.getApplicationId()),
           0.0f, 0.0f, 0.0f, 0.0f,
-          TypeConverter.fromYarn(application.getYarnApplicationState()),
+          TypeConverter.fromYarn(application.getYarnApplicationState(), application.getFinalApplicationStatus()),
           org.apache.hadoop.mapreduce.JobPriority.NORMAL,
           application.getUser(), application.getName(),
           application.getQueue(), jobFile, trackingUrl, false

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java Thu Jun 28 06:59:38 2012
@@ -23,6 +23,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Arrays;
 import java.util.jar.JarOutputStream;
 import java.util.zip.ZipEntry;
 
@@ -61,6 +62,9 @@ import org.apache.hadoop.mapreduce.serve
 public class TestMRWithDistributedCache extends TestCase {
   private static Path TEST_ROOT_DIR =
     new Path(System.getProperty("test.build.data","/tmp"));
+  private static File symlinkFile = new File("distributed.first.symlink");
+  private static File expectedAbsentSymlinkFile =
+    new File("distributed.second.jar");
   private static Configuration conf = new Configuration();
   private static FileSystem localFs;
   static {
@@ -107,20 +111,17 @@ public class TestMRWithDistributedCache 
       TestCase.assertNotNull(cl.getResource("distributed.jar.inside3"));
       TestCase.assertNull(cl.getResource("distributed.jar.inside4"));
 
-
       // Check that the symlink for the renaming was created in the cwd;
-      // This only happens for real for non-local jobtrackers.
-      // (The symlinks exist in "localRunner/" for local Jobtrackers,
-      // but the user has no way to get at them.
-      if (!"local".equals(
-          context.getConfiguration().get(JTConfig.JT_IPC_ADDRESS))) {
-        File symlinkFile = new File("distributed.first.symlink");
-        TestCase.assertTrue("symlink distributed.first.symlink doesn't exist", symlinkFile.exists());
-        TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1, symlinkFile.length());
-      }
+      TestCase.assertTrue("symlink distributed.first.symlink doesn't exist",
+          symlinkFile.exists());
+      TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1,
+          symlinkFile.length());
+      
+      TestCase.assertFalse("second file should not be symlinked",
+          expectedAbsentSymlinkFile.exists());
     }
   }
-
+  
   private void testWithConf(Configuration conf) throws IOException,
       InterruptedException, ClassNotFoundException, URISyntaxException {
     // Create a temporary file of length 1.
@@ -144,11 +145,7 @@ public class TestMRWithDistributedCache 
     job.addFileToClassPath(second);
     job.addArchiveToClassPath(third);
     job.addCacheArchive(fourth.toUri());
-    
-    // don't create symlink for LocalJobRunner
-    if (!"local".equals(conf.get(JTConfig.JT_IPC_ADDRESS))) {
-      job.createSymlink();
-    }
+    job.createSymlink();
     job.setMaxMapAttempts(1); // speed up failures
 
     job.submit();
@@ -157,10 +154,17 @@ public class TestMRWithDistributedCache 
 
   /** Tests using the local job runner. */
   public void testLocalJobRunner() throws Exception {
+    symlinkFile.delete(); // ensure symlink is not present (e.g. if test is
+                          // killed part way through)
+    
     Configuration c = new Configuration();
     c.set(JTConfig.JT_IPC_ADDRESS, "local");
     c.set("fs.defaultFS", "file:///");
     testWithConf(c);
+    
+    assertFalse("Symlink not removed by local job runner",
+            // Symlink target will have gone so can't use File.exists()
+            Arrays.asList(new File(".").list()).contains(symlinkFile.getName()));
   }
 
   private Path createTempFile(String filename, String contents)

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java Thu Jun 28 06:59:38 2012
@@ -27,6 +27,7 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
@@ -45,7 +46,7 @@ public class TestTypeConverter {
   @Test
   public void testEnums() throws Exception {
     for (YarnApplicationState applicationState : YarnApplicationState.values()) {
-      TypeConverter.fromYarn(applicationState);
+      TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
     }
     
     for (TaskType taskType : TaskType.values()) {
@@ -63,8 +64,6 @@ public class TestTypeConverter {
     for (TaskState taskState : TaskState.values()) {
       TypeConverter.fromYarn(taskState);
     }
-    
-    
   }
   
   @Test

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java Thu Jun 28 06:59:38 2012
@@ -18,30 +18,19 @@
 
 package org.apache.hadoop.mapreduce;
 
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.net.URL;
-import java.net.URLConnection;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
-import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
@@ -1367,14 +1356,6 @@ public class Job extends JobContextImpl 
       Job.TaskStatusFilter filter, boolean profiling, IntegerRanges mapRanges,
       IntegerRanges reduceRanges) throws IOException, InterruptedException {
     for (TaskCompletionEvent event : events) {
-      TaskCompletionEvent.Status status = event.getStatus();
-      if (profiling && shouldDownloadProfile() &&
-         (status == TaskCompletionEvent.Status.SUCCEEDED ||
-            status == TaskCompletionEvent.Status.FAILED) &&
-            (event.isMapTask() ? mapRanges : reduceRanges).
-              isIncluded(event.idWithinJob())) {
-        downloadProfile(event);
-      }
       switch (filter) {
       case NONE:
         break;
@@ -1382,7 +1363,6 @@ public class Job extends JobContextImpl 
         if (event.getStatus() == 
           TaskCompletionEvent.Status.SUCCEEDED) {
           LOG.info(event.toString());
-          displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp());
         }
         break; 
       case FAILED:
@@ -1397,8 +1377,6 @@ public class Job extends JobContextImpl 
               System.err.println(diagnostics);
             }
           }
-          // Displaying the task logs
-          displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp());
         }
         break; 
       case KILLED:
@@ -1408,67 +1386,10 @@ public class Job extends JobContextImpl 
         break; 
       case ALL:
         LOG.info(event.toString());
-        displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp());
         break;
       }
     }
   }
-  
-  private void downloadProfile(TaskCompletionEvent e) throws IOException  {
-    URLConnection connection = new URL(
-      getTaskLogURL(e.getTaskAttemptId(), e.getTaskTrackerHttp()) + 
-      "&filter=profile").openConnection();
-    InputStream in = connection.getInputStream();
-    OutputStream out = new FileOutputStream(e.getTaskAttemptId() + ".profile");
-    IOUtils.copyBytes(in, out, 64 * 1024, true);
-  }
-  
-  private void displayTaskLogs(TaskAttemptID taskId, String baseUrl)
-      throws IOException {
-    // The tasktracker for a 'failed/killed' job might not be around...
-    if (baseUrl != null) {
-      // Construct the url for the tasklogs
-      String taskLogUrl = getTaskLogURL(taskId, baseUrl);
-      
-      // Copy tasks's stdout of the JobClient
-      getTaskLogs(taskId, new URL(taskLogUrl+"&filter=stdout"), System.out);
-        
-      // Copy task's stderr to stderr of the JobClient 
-      getTaskLogs(taskId, new URL(taskLogUrl+"&filter=stderr"), System.err);
-    }
-  }
-    
-  private void getTaskLogs(TaskAttemptID taskId, URL taskLogUrl, 
-                           OutputStream out) {
-    try {
-      int tasklogtimeout = cluster.getConf().getInt(
-        TASKLOG_PULL_TIMEOUT_KEY, DEFAULT_TASKLOG_TIMEOUT);
-      URLConnection connection = taskLogUrl.openConnection();
-      connection.setReadTimeout(tasklogtimeout);
-      connection.setConnectTimeout(tasklogtimeout);
-      BufferedReader input = 
-        new BufferedReader(new InputStreamReader(connection.getInputStream()));
-      BufferedWriter output = 
-        new BufferedWriter(new OutputStreamWriter(out));
-      try {
-        String logData = null;
-        while ((logData = input.readLine()) != null) {
-          if (logData.length() > 0) {
-            output.write(taskId + ": " + logData + "\n");
-            output.flush();
-          }
-        }
-      } finally {
-        input.close();
-      }
-    } catch(IOException ioe) {
-      LOG.warn("Error reading task output " + ioe.getMessage()); 
-    }
-  }
-  
-  private String getTaskLogURL(TaskAttemptID taskId, String baseUrl) {
-    return (baseUrl + "/tasklog?plaintext=true&attemptid=" + taskId); 
-  }
 
   /** The interval at which monitorAndPrintJob() prints status */
   public static int getProgressPollInterval(Configuration conf) {

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java Thu Jun 28 06:59:38 2012
@@ -27,12 +27,18 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 /**
  * A utility to manage job submission files.
  */
 @InterfaceAudience.Private
 public class JobSubmissionFiles {
 
+  private final static Log LOG = LogFactory.getLog(JobSubmissionFiles.class);
+
   // job submission directory is private!
   final public static FsPermission JOB_DIR_PERMISSION =
     FsPermission.createImmutable((short) 0700); // rwx--------
@@ -102,14 +108,18 @@ public class JobSubmissionFiles {
     if (fs.exists(stagingArea)) {
       FileStatus fsStatus = fs.getFileStatus(stagingArea);
       String owner = fsStatus.getOwner();
-      if (!(owner.equals(currentUser) || owner.equals(realUser)) || 
-          !fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
-         throw new IOException("The ownership/permissions on the staging " +
-                      "directory " + stagingArea + " is not as expected. " + 
-                      "It is owned by " + owner + " and permissions are "+ 
-                      fsStatus.getPermission() + ". The directory must " +
+      if (!(owner.equals(currentUser) || owner.equals(realUser))) {
+         throw new IOException("The ownership on the staging directory " +
+                      stagingArea + " is not as expected. " +
+                      "It is owned by " + owner + ". The directory must " +
                       "be owned by the submitter " + currentUser + " or " +
-                      "by " + realUser + " and permissions must be rwx------");
+                      "by " + realUser);
+      }
+      if (!fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
+        LOG.info("Permissions on staging directory " + stagingArea + " are " +
+          "incorrect: " + fsStatus.getPermission() + ". Fixing permissions " +
+          "to correct value " + JOB_DIR_PERMISSION);
+        fs.setPermission(stagingArea, JOB_DIR_PERMISSION);
       }
     } else {
       fs.mkdirs(stagingArea, 
@@ -118,4 +128,4 @@ public class JobSubmissionFiles {
     return stagingArea;
   }
   
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java Thu Jun 28 06:59:38 2012
@@ -282,9 +282,12 @@ public class JobHistoryParser {
       if(attemptInfo.getAttemptId().equals(taskInfo.getSuccessfulAttemptId()))
       {
         // the failed attempt is the one that made this task successful
-        // so its no longer successful
+        // so its no longer successful. Reset fields set in
+        // handleTaskFinishedEvent()
+        taskInfo.counters = null;
+        taskInfo.finishTime = -1;
         taskInfo.status = null;
-        // not resetting the other fields set in handleTaskFinishedEvent()
+        taskInfo.successfulAttemptId = null;
       }
     }
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java Thu Jun 28 06:59:38 2012
@@ -137,24 +137,26 @@ class ShuffleScheduler<K,V> {
 
       // update the status
       totalBytesShuffledTillNow += bytes;
-      float mbs = (float) totalBytesShuffledTillNow / (1024 * 1024);
-      int mapsDone = totalMaps - remainingMaps;
-      long secsSinceStart = 
-        (System.currentTimeMillis()-startTime)/1000+1;
-
-      float transferRate = mbs/secsSinceStart;
-      progress.set((float) mapsDone / totalMaps);
-      String statusString = mapsDone + " / " + totalMaps + " copied.";
-      status.setStateString(statusString);
-      progress.setStatus("copy(" + mapsDone + " of " + totalMaps 
-          + " at " +
-          mbpsFormat.format(transferRate) +  " MB/s)");
-      
+      updateStatus();
       reduceShuffleBytes.increment(bytes);
       lastProgressTime = System.currentTimeMillis();
-      LOG.debug("map " + mapId + " done " + statusString);
+      LOG.debug("map " + mapId + " done " + status.getStateString());
     }
   }
+  
+  private void updateStatus() {
+    float mbs = (float) totalBytesShuffledTillNow / (1024 * 1024);
+    int mapsDone = totalMaps - remainingMaps;
+    long secsSinceStart = (System.currentTimeMillis() - startTime) / 1000 + 1;
+
+    float transferRate = mbs / secsSinceStart;
+    progress.set((float) mapsDone / totalMaps);
+    String statusString = mapsDone + " / " + totalMaps + " copied.";
+    status.setStateString(statusString);
+
+    progress.setStatus("copy(" + mapsDone + " of " + totalMaps + " at "
+        + mbpsFormat.format(transferRate) + " MB/s)");
+  }
 
   public synchronized void copyFailed(TaskAttemptID mapId, MapHost host,
                                       boolean readError) {
@@ -256,7 +258,13 @@ class ShuffleScheduler<K,V> {
   }
   
   public synchronized void tipFailed(TaskID taskId) {
-    finishedMaps[taskId.getId()] = true;
+    if (!finishedMaps[taskId.getId()]) {
+      finishedMaps[taskId.getId()] = true;
+      if (--remainingMaps == 0) {
+        notifyAll();
+      }
+      updateStatus();
+    }
   }
   
   public synchronized void addKnownMapOutput(String hostName, 

Propchange: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml:r1346682-1354801

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java Thu Jun 28 06:59:38 2012
@@ -30,6 +30,7 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.util.Records;
 
 public class CompletedTaskAttempt implements TaskAttempt {
@@ -58,6 +59,11 @@ public class CompletedTaskAttempt implem
   }
 
   @Override
+  public NodeId getNodeId() throws UnsupportedOperationException{
+    throw new UnsupportedOperationException();
+  }
+  
+  @Override
   public ContainerId getAssignedContainerID() {
     return attemptInfo.getContainerId();
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java Thu Jun 28 06:59:38 2012
@@ -19,13 +19,26 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import junit.framework.Assert;
 
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.JobStatus.State;
 import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.Records;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
@@ -35,7 +48,7 @@ public class TestResourceMgrDelegate {
   /**
    * Tests that getRootQueues makes a request for the (recursive) child queues
    */
-@Test
+  @Test
   public void testGetRootQueues() throws IOException, InterruptedException {
     ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
     GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class);
@@ -60,4 +73,56 @@ public class TestResourceMgrDelegate {
       argument.getValue().getRecursive());
   }
 
+  @Test
+  public void tesAllJobs() throws Exception {
+    ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
+    GetAllApplicationsResponse allApplicationsResponse = Records
+        .newRecord(GetAllApplicationsResponse.class);
+    List<ApplicationReport> applications = new ArrayList<ApplicationReport>();
+    applications.add(getApplicationReport(YarnApplicationState.FINISHED,
+        FinalApplicationStatus.FAILED));
+    applications.add(getApplicationReport(YarnApplicationState.FINISHED,
+        FinalApplicationStatus.SUCCEEDED));
+    applications.add(getApplicationReport(YarnApplicationState.FINISHED,
+        FinalApplicationStatus.KILLED));
+    applications.add(getApplicationReport(YarnApplicationState.FAILED,
+        FinalApplicationStatus.FAILED));
+    allApplicationsResponse.setApplicationList(applications);
+    Mockito.when(
+        applicationsManager.getAllApplications(Mockito
+            .any(GetAllApplicationsRequest.class))).thenReturn(
+        allApplicationsResponse);
+    ResourceMgrDelegate resourceMgrDelegate = new ResourceMgrDelegate(
+        new YarnConfiguration(), applicationsManager);
+    JobStatus[] allJobs = resourceMgrDelegate.getAllJobs();
+
+    Assert.assertEquals(State.FAILED, allJobs[0].getState());
+    Assert.assertEquals(State.SUCCEEDED, allJobs[1].getState());
+    Assert.assertEquals(State.KILLED, allJobs[2].getState());
+    Assert.assertEquals(State.FAILED, allJobs[3].getState());
+  }
+
+  private ApplicationReport getApplicationReport(
+      YarnApplicationState yarnApplicationState,
+      FinalApplicationStatus finalApplicationStatus) {
+    ApplicationReport appReport = Mockito.mock(ApplicationReport.class);
+    ApplicationResourceUsageReport appResources = Mockito
+        .mock(ApplicationResourceUsageReport.class);
+    Mockito.when(appReport.getApplicationId()).thenReturn(
+        Records.newRecord(ApplicationId.class));
+    Mockito.when(appResources.getNeededResources()).thenReturn(
+        Records.newRecord(Resource.class));
+    Mockito.when(appResources.getReservedResources()).thenReturn(
+        Records.newRecord(Resource.class));
+    Mockito.when(appResources.getUsedResources()).thenReturn(
+        Records.newRecord(Resource.class));
+    Mockito.when(appReport.getApplicationResourceUsageReport()).thenReturn(
+        appResources);
+    Mockito.when(appReport.getYarnApplicationState()).thenReturn(
+        yarnApplicationState);
+    Mockito.when(appReport.getFinalApplicationStatus()).thenReturn(
+        finalApplicationStatus);
+
+    return appReport;
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java Thu Jun 28 06:59:38 2012
@@ -38,5 +38,9 @@ public enum NodeState {
   LOST, 
   
   /** Node has rebooted */
-  REBOOTED
-}
\ No newline at end of file
+  REBOOTED;
+  
+  public boolean isUnusable() {
+    return (this == UNHEALTHY || this == DECOMMISSIONED || this == LOST);
+  }
+}

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java Thu Jun 28 06:59:38 2012
@@ -44,7 +44,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.AMRMProtocol;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ContainerManager;
@@ -635,12 +634,10 @@ public class ApplicationMaster {
       ctx.setContainerId(container.getId());
       ctx.setResource(container.getResource());
 
-      try {
-        ctx.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
-      } catch (IOException e) {
-        LOG.info("Getting current user info failed when trying to launch the container"
-            + e.getMessage());
-      }
+      String jobUserName = System.getenv(ApplicationConstants.Environment.USER
+          .name());
+      ctx.setUser(jobUserName);
+      LOG.info("Setting user in ContainerLaunchContext to: " + jobUserName);
 
       // Set the environment 
       ctx.setEnvironment(shellEnv);

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java Thu Jun 28 06:59:38 2012
@@ -95,7 +95,7 @@ import org.apache.hadoop.yarn.util.Recor
  * 
  * <p> For the actual job submission, the client first has to create an {@link ApplicationSubmissionContext}. 
  * The {@link ApplicationSubmissionContext} defines the application details such as {@link ApplicationId} 
- * and application name, user submitting the application, the priority assigned to the application and the queue 
+ * and application name, the priority assigned to the application and the queue
  * to which this application needs to be assigned. In addition to this, the {@link ApplicationSubmissionContext}
  * also defines the {@link ContainerLaunchContext} which describes the <code>Container</code> with which 
  * the {@link ApplicationMaster} is launched. </p>
@@ -132,8 +132,6 @@ public class Client {
   private int amPriority = 0;
   // Queue for App master
   private String amQueue = "";
-  // User to run app master as
-  private String amUser = "";
   // Amt. of memory resource to request for to run the App Master
   private int amMemory = 10; 
 
@@ -221,6 +219,7 @@ public class Client {
    * Parse command line options
    * @param args Parsed command line options 
    * @return Whether the init was successful to run the client
+   * @throws ParseException
    */
   public boolean init(String[] args) throws ParseException {
 
@@ -228,7 +227,6 @@ public class Client {
     opts.addOption("appname", true, "Application Name. Default value - DistributedShell");
     opts.addOption("priority", true, "Application Priority. Default 0");
     opts.addOption("queue", true, "RM Queue in which this application is to be submitted");
-    opts.addOption("user", true, "User to run the application as");
     opts.addOption("timeout", true, "Application timeout in milliseconds");
     opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master");
     opts.addOption("jar", true, "Jar file containing the application master");
@@ -262,8 +260,7 @@ public class Client {
 
     appName = cliParser.getOptionValue("appname", "DistributedShell");
     amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0"));
-    amQueue = cliParser.getOptionValue("queue", "");
-    amUser = cliParser.getOptionValue("user", "");
+    amQueue = cliParser.getOptionValue("queue", "default");
     amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "10"));		
 
     if (amMemory < 0) {
@@ -356,6 +353,7 @@ public class Client {
     }
 
     GetQueueInfoRequest queueInfoReq = Records.newRecord(GetQueueInfoRequest.class);
+    queueInfoReq.setQueueName(this.amQueue);
     GetQueueInfoResponse queueInfoResp = applicationsManager.getQueueInfo(queueInfoReq);		
     QueueInfo queueInfo = queueInfoResp.getQueueInfo();
     LOG.info("Queue info"
@@ -567,10 +565,6 @@ public class Client {
     commands.add(command.toString());		
     amContainer.setCommands(commands);
 
-    // For launching an AM Container, setting user here is not needed
-    // Set user in ApplicationSubmissionContext
-    // amContainer.setUser(amUser);
-
     // Set up resource type requirements
     // For now, only memory is supported so we set memory requirements
     Resource capability = Records.newRecord(Resource.class);
@@ -594,9 +588,6 @@ public class Client {
 
     // Set the queue to which this application is to be submitted in the RM
     appContext.setQueue(amQueue);
-    // Set the user submitting this application 
-    // TODO can it be empty? 
-    appContext.setUser(amUser);
 
     // Create the request to send to the applications manager 
     SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class);

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java Thu Jun 28 06:59:38 2012
@@ -28,6 +28,7 @@ import java.util.concurrent.LinkedBlocki
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.service.AbstractService;
 
@@ -127,7 +128,8 @@ public class AsyncDispatcher extends Abs
     catch (Throwable t) {
       //TODO Maybe log the state of the queue
       LOG.fatal("Error in dispatcher thread", t);
-      if (exitOnDispatchException) {
+      if (exitOnDispatchException
+          && (ShutdownHookManager.get().isShutdownInProgress()) == false) {
         LOG.info("Exiting, bbye..");
         System.exit(-1);
       }

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml Thu Jun 28 06:59:38 2012
@@ -47,47 +47,37 @@
       <build>
         <plugins>
           <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>make-maven-plugin</artifactId>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>make</id>
                 <phase>compile</phase>
-                <goals>
-                  <goal>autoreconf</goal>
-                  <goal>configure</goal>
-                  <goal>make-install</goal>
-                </goals>
+                <goals><goal>run</goal></goals>
+                <configuration>
+                  <target>
+                    <mkdir dir="${project.build.directory}/native/target"/>
+                    <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="${basedir}/src/ -DHADOOP_CONF_DIR=${container-executor.conf.dir} -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+                      <env key="CFLAGS" value="${container-executor.additional_cflags}"/>
+                    </exec>
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="VERBOSE=1"/>
+                    </exec>
+                  </target>
+                </configuration>
               </execution>
               <execution>
-                <id>test</id>
+                <id>native_tests</id>
                 <phase>test</phase>
-                <goals>
-                  <goal>test</goal>
-                </goals>
+                <configuration>
+                  <target>
+                    <exec executable="test-container-executor" dir="${project.build.directory}/native" failonerror="true">
+                    </exec>
+                  </target>
+                </configuration>
               </execution>
             </executions>
-            <configuration>
-              <!-- autoreconf settings -->
-              <workDir>${project.build.directory}/native/container-executor</workDir>
-              <arguments>
-                <argument>-i</argument>
-              </arguments>
-
-              <!-- configure settings -->
-              <configureEnvironment>
-                <property>
-                  <name>CFLAGS</name>
-                  <value>-DHADOOP_CONF_DIR=${container-executor.conf.dir} ${container-executor.additional_cflags}</value>
-                </property>
-              </configureEnvironment>
-              <configureWorkDir>${project.build.directory}/native/container-executor</configureWorkDir>
-              <prefix>/usr/local</prefix>
-
-              <!-- configure & make settings -->
-              <destDir>${project.build.directory}/native/target</destDir>
-
-            </configuration>
           </plugin>
         </plugins>
       </build>
@@ -172,14 +162,6 @@
             <goals>
               <goal>run</goal>
             </goals>
-            <configuration>
-              <target>
-                <mkdir dir="${project.build.directory}/native"/>
-                <copy toDir="${project.build.directory}/native">
-                  <fileset dir="${basedir}/src/main/native"/>
-                </copy>
-              </target>
-            </configuration>
           </execution>
         </executions>
       </plugin>

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java Thu Jun 28 06:59:38 2012
@@ -45,9 +45,6 @@ public class ApplicationPage extends NMV
   @Override protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
 
-    // Per-app information. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
-
     set(DATATABLES_ID, "containers");
     set(initID(DATATABLES, "containers"), containersTableInit());
     setTableStyles(html, "containers");

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java Thu Jun 28 06:59:38 2012
@@ -41,9 +41,6 @@ public class ContainerPage extends NMVie
   protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
 
-    // Per-container information. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
-
     setTitle("Container " + $(CONTAINER_ID));
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java Thu Jun 28 06:59:38 2012
@@ -42,9 +42,6 @@ public class NodePage extends NMView {
   protected void commonPreHead(HTML<_> html) {
     super.commonPreHead(html);
 
-    // Node summary page. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
-
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c Thu Jun 28 06:59:38 2012
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+#include "config.h"
 #include "configuration.h"
 #include "container-executor.h"
 
@@ -29,8 +30,6 @@
 #include <string.h>
 #include <sys/stat.h>
 
-#define _STRINGIFY(X) #X
-#define STRINGIFY(X) _STRINGIFY(X)
 #define CONF_FILENAME "container-executor.cfg"
 
 // When building as part of a Maven build this value gets defined by using
@@ -101,7 +100,7 @@ int main(int argc, char **argv) {
 
   char *executable_file = get_executable();
 
-  char *orig_conf_file = STRINGIFY(HADOOP_CONF_DIR) "/" CONF_FILENAME;
+  char *orig_conf_file = HADOOP_CONF_DIR "/" CONF_FILENAME;
   char *conf_file = resolve_config_path(orig_conf_file, argv[0]);
   char *local_dirs, *log_dirs;
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java Thu Jun 28 06:59:38 2012
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.server.re
 
 
 import java.io.IOException;
-import java.net.InetAddress;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
@@ -48,8 +47,8 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
@@ -327,7 +326,8 @@ public class ResourceManager extends Com
           } catch (Throwable t) {
             LOG.fatal("Error in handling event type " + event.getType()
                 + " to the scheduler", t);
-            if (shouldExitOnError) {
+            if (shouldExitOnError
+                && !ShutdownHookManager.get().isShutdownInProgress()) {
               LOG.info("Exiting, bbye..");
               System.exit(-1);
             }

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java Thu Jun 28 06:59:38 2012
@@ -80,13 +80,13 @@ public class CapacitySchedulerConfigurat
   DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT = 0.1f;
   
   @Private
-  public static final int UNDEFINED = -1;
+  public static final float UNDEFINED = -1;
   
   @Private
-  public static final int MINIMUM_CAPACITY_VALUE = 1;
+  public static final float MINIMUM_CAPACITY_VALUE = 1;
   
   @Private
-  public static final int MAXIMUM_CAPACITY_VALUE = 100;
+  public static final float MAXIMUM_CAPACITY_VALUE = 100;
   
   @Private
   public static final int DEFAULT_USER_LIMIT = 100;
@@ -132,8 +132,8 @@ public class CapacitySchedulerConfigurat
         DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT);
   }
   
-  public int getCapacity(String queue) {
-    int capacity = getInt(getQueuePrefix(queue) + CAPACITY, UNDEFINED);
+  public float getCapacity(String queue) {
+    float capacity = getFloat(getQueuePrefix(queue) + CAPACITY, UNDEFINED);
     if (capacity < MINIMUM_CAPACITY_VALUE || capacity > MAXIMUM_CAPACITY_VALUE) {
       throw new IllegalArgumentException("Illegal " +
       		"capacity of " + capacity + " for queue " + queue);
@@ -143,31 +143,31 @@ public class CapacitySchedulerConfigurat
     return capacity;
   }
   
-  public void setCapacity(String queue, int capacity) {
-    setInt(getQueuePrefix(queue) + CAPACITY, capacity);
+  public void setCapacity(String queue, float capacity) {
+    setFloat(getQueuePrefix(queue) + CAPACITY, capacity);
     LOG.debug("CSConf - setCapacity: queuePrefix=" + getQueuePrefix(queue) + 
         ", capacity=" + capacity);
   }
 
-  public int getMaximumCapacity(String queue) {
-    int maxCapacity = 
-      getInt(getQueuePrefix(queue) + MAXIMUM_CAPACITY, MAXIMUM_CAPACITY_VALUE);
+  public float getMaximumCapacity(String queue) {
+    float maxCapacity = getFloat(getQueuePrefix(queue) + MAXIMUM_CAPACITY,
+        MAXIMUM_CAPACITY_VALUE);
     return maxCapacity;
   }
   
-  public void setMaximumCapacity(String queue, int maxCapacity) {
+  public void setMaximumCapacity(String queue, float maxCapacity) {
     if (maxCapacity > MAXIMUM_CAPACITY_VALUE) {
       throw new IllegalArgumentException("Illegal " +
           "maximum-capacity of " + maxCapacity + " for queue " + queue);
     }
-    setInt(getQueuePrefix(queue) + MAXIMUM_CAPACITY, maxCapacity);
+    setFloat(getQueuePrefix(queue) + MAXIMUM_CAPACITY, maxCapacity);
     LOG.debug("CSConf - setMaxCapacity: queuePrefix=" + getQueuePrefix(queue) + 
         ", maxCapacity=" + maxCapacity);
   }
   
   public int getUserLimit(String queue) {
-    int userLimit = 
-      getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT);
+    int userLimit = getInt(getQueuePrefix(queue) + USER_LIMIT,
+        DEFAULT_USER_LIMIT);
     return userLimit;
   }
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java Thu Jun 28 06:59:38 2012
@@ -1180,9 +1180,16 @@ public class LeafQueue implements CSQueu
     if (UserGroupInformation.isSecurityEnabled()) {
       ContainerTokenIdentifier tokenIdentifier = new ContainerTokenIdentifier(
           containerId, nodeId.toString(), capability);
-      containerToken = BuilderUtils.newContainerToken(nodeId, ByteBuffer
-          .wrap(containerTokenSecretManager
-              .createPassword(tokenIdentifier)), tokenIdentifier);
+      try {
+        containerToken = BuilderUtils.newContainerToken(nodeId, ByteBuffer
+            .wrap(containerTokenSecretManager
+                .createPassword(tokenIdentifier)), tokenIdentifier);
+      } catch (IllegalArgumentException e) {
+         // this could be because DNS is down - in which case we just want
+         // to retry and not bring RM down
+         LOG.error("Error trying to create new container", e);
+         return null;
+      }
     }
 
     // Create the container
@@ -1211,6 +1218,11 @@ public class LeafQueue implements CSQueu
     // Create the container if necessary
     Container container = 
         getContainer(rmContainer, application, node, capability, priority);
+  
+    // something went wrong getting/creating the container 
+    if (container == null) {
+      return Resources.none();
+    }
 
     // Can we allocate a container on this node?
     int availableContainers = 

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java Thu Jun 28 06:59:38 2012
@@ -111,7 +111,7 @@ public class ParentQueue implements CSQu
 			      cs.getConfiguration().getEnableUserMetrics(),
 			      cs.getConf());
 
-    int rawCapacity = cs.getConfiguration().getCapacity(getQueuePath());
+    float rawCapacity = cs.getConfiguration().getCapacity(getQueuePath());
 
     if (rootQueue &&
         (rawCapacity != CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE)) {

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java Thu Jun 28 06:59:38 2012
@@ -24,8 +24,6 @@ public class AppPage extends RmView {
 
   @Override protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
-    // App page is per-app information. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
   }
 
   @Override protected Class<? extends SubView> content() {

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java Thu Jun 28 06:59:38 2012
@@ -59,13 +59,13 @@ public class TestCapacityScheduler {
   private static final String B1 = B + ".b1";
   private static final String B2 = B + ".b2";
   private static final String B3 = B + ".b3";
-  private static int A_CAPACITY = 10;
-  private static int B_CAPACITY = 90;
-  private static int A1_CAPACITY = 30;
-  private static int A2_CAPACITY = 70;
-  private static int B1_CAPACITY = 50;
-  private static int B2_CAPACITY = 30;
-  private static int B3_CAPACITY = 20;
+  private static float A_CAPACITY = 10.5f;
+  private static float B_CAPACITY = 89.5f;
+  private static float A1_CAPACITY = 30;
+  private static float A2_CAPACITY = 70;
+  private static float B1_CAPACITY = 50;
+  private static float B2_CAPACITY = 30;
+  private static float B3_CAPACITY = 20;
 
   private ResourceManager resourceManager = null;
   
@@ -250,14 +250,14 @@ public class TestCapacityScheduler {
     cs.reinitialize(conf, null, null);
     checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY);
 
-    conf.setCapacity(A, 80);
-    conf.setCapacity(B, 20);
+    conf.setCapacity(A, 80f);
+    conf.setCapacity(B, 20f);
     cs.reinitialize(conf, null,null);
-    checkQueueCapacities(cs, 80, 20);
+    checkQueueCapacities(cs, 80f, 20f);
   }
 
   private void checkQueueCapacities(CapacityScheduler cs,
-      int capacityA, int capacityB) {
+      float capacityA, float capacityB) {
     CSQueue rootQueue = cs.getRootQueue();
     CSQueue queueA = findQueue(rootQueue, A);
     CSQueue queueB = findQueue(rootQueue, B);
@@ -274,13 +274,13 @@ public class TestCapacityScheduler {
     checkQueueCapacity(queueB, capB, capB, 1.0f, 1.0f);
     checkQueueCapacity(queueA1, A1_CAPACITY / 100.0f,
         (A1_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
-    checkQueueCapacity(queueA2, (float)A2_CAPACITY / 100.0f,
+    checkQueueCapacity(queueA2, A2_CAPACITY / 100.0f,
         (A2_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
-    checkQueueCapacity(queueB1, (float)B1_CAPACITY / 100.0f,
+    checkQueueCapacity(queueB1, B1_CAPACITY / 100.0f,
         (B1_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
-    checkQueueCapacity(queueB2, (float)B2_CAPACITY / 100.0f,
+    checkQueueCapacity(queueB2, B2_CAPACITY / 100.0f,
         (B2_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
-    checkQueueCapacity(queueB3, (float)B3_CAPACITY / 100.0f,
+    checkQueueCapacity(queueB3, B3_CAPACITY / 100.0f,
         (B3_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
   }
 
@@ -340,7 +340,7 @@ public class TestCapacityScheduler {
     CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
     setupQueueConfiguration(conf);
     conf.setQueues(CapacitySchedulerConfiguration.ROOT + ".a.a1", new String[] {"b1"} );
-    conf.setCapacity(CapacitySchedulerConfiguration.ROOT + ".a.a1.b1", 100);
+    conf.setCapacity(CapacitySchedulerConfiguration.ROOT + ".a.a1.b1", 100.0f);
     conf.setUserLimitFactor(CapacitySchedulerConfiguration.ROOT + ".a.a1.b1", 100.0f);
 
     cs.reinitialize(conf, null, null);

Modified: hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Thu Jun 28 06:59:38 2012
@@ -135,7 +135,7 @@ public class TestLeafQueue {
     conf.setAcl(CapacitySchedulerConfiguration.ROOT, QueueACL.SUBMIT_APPLICATIONS, " ");
     
     final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + A;
-    conf.setCapacity(Q_A, 9);
+    conf.setCapacity(Q_A, 8.5f);
     conf.setMaximumCapacity(Q_A, 20);
     conf.setAcl(Q_A, QueueACL.SUBMIT_APPLICATIONS, "*");
     
@@ -145,7 +145,7 @@ public class TestLeafQueue {
     conf.setAcl(Q_B, QueueACL.SUBMIT_APPLICATIONS, "*");
 
     final String Q_C = CapacitySchedulerConfiguration.ROOT + "." + C;
-    conf.setCapacity(Q_C, 1);
+    conf.setCapacity(Q_C, 1.5f);
     conf.setMaximumCapacity(Q_C, 10);
     conf.setAcl(Q_C, QueueACL.SUBMIT_APPLICATIONS, " ");
     
@@ -208,8 +208,8 @@ public class TestLeafQueue {
 	  //can add more sturdy test with 3-layer queues 
 	  //once MAPREDUCE:3410 is resolved
 	  LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
-	  assertEquals(0.09, a.getCapacity(), epsilon);
-	  assertEquals(0.09, a.getAbsoluteCapacity(), epsilon);
+	  assertEquals(0.085, a.getCapacity(), epsilon);
+	  assertEquals(0.085, a.getAbsoluteCapacity(), epsilon);
 	  assertEquals(0.2, a.getMaximumCapacity(), epsilon);
 	  assertEquals(0.2, a.getAbsoluteMaximumCapacity(), epsilon);
 	  
@@ -220,8 +220,8 @@ public class TestLeafQueue {
 	  assertEquals(0.99, b.getAbsoluteMaximumCapacity(), epsilon);
 
 	  ParentQueue c = (ParentQueue)queues.get(C);
-	  assertEquals(0.01, c.getCapacity(), epsilon);
-	  assertEquals(0.01, c.getAbsoluteCapacity(), epsilon);
+	  assertEquals(0.015, c.getCapacity(), epsilon);
+	  assertEquals(0.015, c.getAbsoluteCapacity(), epsilon);
 	  assertEquals(0.1, c.getMaximumCapacity(), epsilon);
 	  assertEquals(0.1, c.getAbsoluteMaximumCapacity(), epsilon);
   }



Mime
View raw message