hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gkesa...@apache.org
Subject svn commit: r1369164 [7/7] - in /hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project: ./ conf/ dev-support/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-mapreduc...
Date Fri, 03 Aug 2012 19:00:51 GMT
Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java Fri Aug  3 19:00:15 2012
@@ -34,6 +34,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import junit.framework.Assert;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -44,8 +46,8 @@ import org.apache.hadoop.yarn.conf.YarnC
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -99,22 +101,22 @@ public class TestParentQueue {
     LOG.info("Setup top-level queues a and b");
   }
 
-  private SchedulerApp getMockApplication(int appId, String user) {
-    SchedulerApp application = mock(SchedulerApp.class);
+  private FiCaSchedulerApp getMockApplication(int appId, String user) {
+    FiCaSchedulerApp application = mock(FiCaSchedulerApp.class);
     doReturn(user).when(application).getUser();
     doReturn(Resources.createResource(0)).when(application).getHeadroom();
     return application;
   }
 
   private void stubQueueAllocation(final CSQueue queue, 
-      final Resource clusterResource, final SchedulerNode node, 
+      final Resource clusterResource, final FiCaSchedulerNode node, 
       final int allocation) {
     stubQueueAllocation(queue, clusterResource, node, allocation, 
         NodeType.NODE_LOCAL);
   }
   
   private void stubQueueAllocation(final CSQueue queue, 
-      final Resource clusterResource, final SchedulerNode node, 
+      final Resource clusterResource, final FiCaSchedulerNode node, 
       final int allocation, final NodeType type) {
     
     // Simulate the queue allocation
@@ -132,7 +134,7 @@ public class TestParentQueue {
           ((ParentQueue)queue).allocateResource(clusterResource, 
               allocatedResource);
         } else {
-          SchedulerApp app1 = getMockApplication(0, "");
+          FiCaSchedulerApp app1 = getMockApplication(0, "");
           ((LeafQueue)queue).allocateResource(clusterResource, app1, 
               allocatedResource);
         }
@@ -198,9 +200,9 @@ public class TestParentQueue {
     final int memoryPerNode = 10;
     final int numNodes = 2;
     
-    SchedulerNode node_0 = 
+    FiCaSchedulerNode node_0 = 
         TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode*GB);
-    SchedulerNode node_1 = 
+    FiCaSchedulerNode node_1 = 
         TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
     
     final Resource clusterResource = 
@@ -224,9 +226,9 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_1);
     InOrder allocationOrder = inOrder(a, b);
     allocationOrder.verify(a).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(b).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(a, 2*GB, clusterResource);
     verifyQueueMetrics(b, 2*GB, clusterResource);
 
@@ -237,9 +239,9 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_0);
     allocationOrder = inOrder(b, a);
     allocationOrder.verify(b).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(a).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(a, 3*GB, clusterResource);
     verifyQueueMetrics(b, 4*GB, clusterResource);
 
@@ -250,9 +252,9 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_0);
     allocationOrder = inOrder(b, a);
     allocationOrder.verify(b).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(a).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(a, 3*GB, clusterResource);
     verifyQueueMetrics(b, 8*GB, clusterResource);
 
@@ -263,13 +265,68 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_1);
     allocationOrder = inOrder(a, b);
     allocationOrder.verify(b).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(a).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(a, 4*GB, clusterResource);
     verifyQueueMetrics(b, 9*GB, clusterResource);
   }
 
+  @Test
+  public void testSingleLevelQueuesPrecision() throws Exception {
+    // Setup queue configs
+    setupSingleLevelQueues(csConf);
+    final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + "a";
+    csConf.setCapacity(Q_A, 30);
+    final String Q_B = CapacitySchedulerConfiguration.ROOT + "." + "b";
+    csConf.setCapacity(Q_B, 70.5F);
+
+    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
+    boolean exceptionOccured = false;
+    try {
+      CapacityScheduler.parseQueue(csContext, csConf, null,
+          CapacitySchedulerConfiguration.ROOT, queues, queues,
+          CapacityScheduler.queueComparator,
+          CapacityScheduler.applicationComparator, TestUtils.spyHook);
+    } catch (IllegalArgumentException ie) {
+      exceptionOccured = true;
+    }
+    if (!exceptionOccured) {
+      Assert.fail("Capacity is more then 100% so should be failed.");
+    }
+    csConf.setCapacity(Q_A, 30);
+    csConf.setCapacity(Q_B, 70);
+    exceptionOccured = false;
+    queues.clear();
+    try {
+      CapacityScheduler.parseQueue(csContext, csConf, null,
+          CapacitySchedulerConfiguration.ROOT, queues, queues,
+          CapacityScheduler.queueComparator,
+          CapacityScheduler.applicationComparator, TestUtils.spyHook);
+    } catch (IllegalArgumentException ie) {
+      exceptionOccured = true;
+    }
+    if (exceptionOccured) {
+      Assert.fail("Capacity is 100% so should not be failed.");
+    }
+    csConf.setCapacity(Q_A, 30);
+    csConf.setCapacity(Q_B, 70.005F);
+    exceptionOccured = false;
+    queues.clear();
+    try {
+      CapacityScheduler.parseQueue(csContext, csConf, null,
+          CapacitySchedulerConfiguration.ROOT, queues, queues,
+          CapacityScheduler.queueComparator,
+          CapacityScheduler.applicationComparator, TestUtils.spyHook);
+    } catch (IllegalArgumentException ie) {
+      exceptionOccured = true;
+    }
+    if (exceptionOccured) {
+      Assert
+          .fail("Capacity is under PRECISION which is .05% so should not be failed.");
+    }
+  }
+  
   private static final String C = "c";
   private static final String C1 = "c1";
   private static final String C11 = "c11";
@@ -346,11 +403,11 @@ public class TestParentQueue {
     final int memoryPerNode = 10;
     final int numNodes = 3;
     
-    SchedulerNode node_0 = 
+    FiCaSchedulerNode node_0 = 
         TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode*GB);
-    SchedulerNode node_1 = 
+    FiCaSchedulerNode node_1 = 
         TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
-    SchedulerNode node_2 = 
+    FiCaSchedulerNode node_2 = 
         TestUtils.getMockNode("host_2", DEFAULT_RACK, 0, memoryPerNode*GB);
     
     final Resource clusterResource = 
@@ -401,11 +458,11 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_0);
     InOrder allocationOrder = inOrder(a, c, b);
     allocationOrder.verify(a).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(c).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(b).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(a, 1*GB, clusterResource);
     verifyQueueMetrics(b, 6*GB, clusterResource);
     verifyQueueMetrics(c, 3*GB, clusterResource);
@@ -427,13 +484,13 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_2);
     allocationOrder = inOrder(a, a2, a1, b, c);
     allocationOrder.verify(a).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(a2).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(b).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(c).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(a, 3*GB, clusterResource);
     verifyQueueMetrics(b, 8*GB, clusterResource);
     verifyQueueMetrics(c, 4*GB, clusterResource);
@@ -457,9 +514,9 @@ public class TestParentQueue {
     final int memoryPerNode = 10;
     final int numNodes = 2;
     
-    SchedulerNode node_0 = 
+    FiCaSchedulerNode node_0 = 
         TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode*GB);
-    SchedulerNode node_1 = 
+    FiCaSchedulerNode node_1 = 
         TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
     
     final Resource clusterResource = 
@@ -484,9 +541,9 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_1);
     InOrder allocationOrder = inOrder(a, b);
     allocationOrder.verify(a).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(b).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(a, 2*GB, clusterResource);
     verifyQueueMetrics(b, 2*GB, clusterResource);
     
@@ -498,9 +555,9 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_0);
     allocationOrder = inOrder(b, a);
     allocationOrder.verify(b).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(a).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(a, 2*GB, clusterResource);
     verifyQueueMetrics(b, 4*GB, clusterResource);
 
@@ -523,9 +580,9 @@ public class TestParentQueue {
     final int memoryPerNode = 10;
     final int numNodes = 2;
     
-    SchedulerNode node_0 = 
+    FiCaSchedulerNode node_0 = 
         TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode*GB);
-    SchedulerNode node_1 = 
+    FiCaSchedulerNode node_1 = 
         TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
     
     final Resource clusterResource = 
@@ -550,9 +607,9 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_1);
     InOrder allocationOrder = inOrder(b2, b3);
     allocationOrder.verify(b2).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(b3).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(b2, 1*GB, clusterResource);
     verifyQueueMetrics(b3, 2*GB, clusterResource);
     
@@ -564,9 +621,9 @@ public class TestParentQueue {
     root.assignContainers(clusterResource, node_0);
     allocationOrder = inOrder(b3, b2);
     allocationOrder.verify(b3).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     allocationOrder.verify(b2).assignContainers(eq(clusterResource), 
-        any(SchedulerNode.class));
+        any(FiCaSchedulerNode.class));
     verifyQueueMetrics(b2, 1*GB, clusterResource);
     verifyQueueMetrics(b3, 3*GB, clusterResource);
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java Fri Aug  3 19:00:15 2012
@@ -44,8 +44,8 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager;
 
 public class TestUtils {
@@ -80,7 +80,7 @@ public class TestUtils {
         new ContainerAllocationExpirer(nullDispatcher);
     
     RMContext rmContext =
-        new RMContextImpl(null, nullDispatcher, cae, null, null,
+        new RMContextImpl(null, nullDispatcher, cae, null, null, null,
           new ApplicationTokenSecretManager(new Configuration()));
     
     return rmContext;
@@ -140,7 +140,7 @@ public class TestUtils {
     return applicationAttemptId;
   }
   
-  public static SchedulerNode getMockNode(
+  public static FiCaSchedulerNode getMockNode(
       String host, String rack, int port, int capability) {
     NodeId nodeId = mock(NodeId.class);
     when(nodeId.getHost()).thenReturn(host);
@@ -153,12 +153,12 @@ public class TestUtils {
     when(rmNode.getHostName()).thenReturn(host);
     when(rmNode.getRackName()).thenReturn(rack);
     
-    SchedulerNode node = spy(new SchedulerNode(rmNode));
+    FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode));
     LOG.info("node = " + host + " avail=" + node.getAvailableResource());
     return node;
   }
   
-  public static ContainerId getMockContainerId(SchedulerApp application) {
+  public static ContainerId getMockContainerId(FiCaSchedulerApp application) {
     ContainerId containerId = mock(ContainerId.class);
     doReturn(application.getApplicationAttemptId()).
     when(containerId).getApplicationAttemptId();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java Fri Aug  3 19:00:15 2012
@@ -86,8 +86,8 @@ public class TestFifoScheduler {
   @Test
   public void testAppAttemptMetrics() throws Exception {
     AsyncDispatcher dispatcher = new InlineDispatcher();
-    RMContext rmContext =
-        new RMContextImpl(null, dispatcher, null, null, null, null);
+    RMContext rmContext = new RMContextImpl(null, dispatcher, null,
+        null, null, null, null);
 
     FifoScheduler schedular = new FifoScheduler();
     schedular.reinitialize(new Configuration(), null, rmContext);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java Fri Aug  3 19:00:15 2012
@@ -158,7 +158,8 @@ public class TestRMWebApp {
     for (RMNode node : deactivatedNodes) {
       deactivatedNodesMap.put(node.getHostName(), node);
     }
-   return new RMContextImpl(new MemStore(), null, null, null, null, null) {
+   return new RMContextImpl(new MemStore(), null, null, null, null,
+       null, null) {
       @Override
       public ConcurrentMap<ApplicationId, RMApp> getRMApps() {
         return applicationsMaps;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java Fri Aug  3 19:00:15 2012
@@ -31,6 +31,7 @@ import javax.xml.parsers.DocumentBuilder
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
@@ -545,6 +546,8 @@ public class TestRMWebServicesApps exten
         .sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
     am.registerAppAttempt();
     am.unregisterAppAttempt();
+    amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),
+        1, ContainerState.COMPLETE);
     rm.submitApp(1024);
     rm.submitApp(1024);
 
@@ -573,6 +576,8 @@ public class TestRMWebServicesApps exten
         .sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
     am.registerAppAttempt();
     am.unregisterAppAttempt();
+    amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),
+        1, ContainerState.COMPLETE);
 
     rm.submitApp(1024);
     rm.submitApp(1024);
@@ -605,6 +610,8 @@ public class TestRMWebServicesApps exten
         .sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
     am.registerAppAttempt();
     am.unregisterAppAttempt();
+    amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),
+        1, ContainerState.COMPLETE);
 
     rm.submitApp(1024);
     rm.submitApp(1024);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java Fri Aug  3 19:00:15 2012
@@ -135,18 +135,21 @@ public class MiniYARNCluster extends Com
     public synchronized void start() {
       try {
         getConfig().setBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, true);
-        getConfig().set(YarnConfiguration.RM_ADDRESS,
-                        MiniYARNCluster.getHostname() + ":0");
-        getConfig().set(YarnConfiguration.RM_ADDRESS,
-                        MiniYARNCluster.getHostname() + ":0");
-        getConfig().set(YarnConfiguration.RM_ADMIN_ADDRESS,
-                        MiniYARNCluster.getHostname() + ":0");
-        getConfig().set(YarnConfiguration.RM_SCHEDULER_ADDRESS,
-                        MiniYARNCluster.getHostname() + ":0");
-        getConfig().set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
-                        MiniYARNCluster.getHostname() + ":0");
-        getConfig().set(YarnConfiguration.RM_WEBAPP_ADDRESS,
-                        MiniYARNCluster.getHostname() + ":0");
+        if (!getConfig().getBoolean(
+            YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS,
+            YarnConfiguration.DEFAULT_YARN_MINICLUSTER_FIXED_PORTS)) {
+          // pick free random ports.
+          getConfig().set(YarnConfiguration.RM_ADDRESS,
+              MiniYARNCluster.getHostname() + ":0");
+          getConfig().set(YarnConfiguration.RM_ADMIN_ADDRESS,
+              MiniYARNCluster.getHostname() + ":0");
+          getConfig().set(YarnConfiguration.RM_SCHEDULER_ADDRESS,
+              MiniYARNCluster.getHostname() + ":0");
+          getConfig().set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+              MiniYARNCluster.getHostname() + ":0");
+          getConfig().set(YarnConfiguration.RM_WEBAPP_ADDRESS,
+              MiniYARNCluster.getHostname() + ":0");
+        }
         Store store = StoreFactory.getStore(getConfig());
         resourceManager = new ResourceManager(store) {
           @Override

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java Fri Aug  3 19:00:15 2012
@@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
@@ -214,11 +215,12 @@ public class TestContainerManagerSecurit
 
     ContainerTokenIdentifier dummyIdentifier = new ContainerTokenIdentifier();
     dummyIdentifier.readFields(di);
+
     // Malice user modifies the resource amount
     Resource modifiedResource = BuilderUtils.newResource(2048);
     ContainerTokenIdentifier modifiedIdentifier = new ContainerTokenIdentifier(
         dummyIdentifier.getContainerID(), dummyIdentifier.getNmHostAddress(),
-        modifiedResource);
+        modifiedResource, Long.MAX_VALUE);
     Token<ContainerTokenIdentifier> modifiedToken = new Token<ContainerTokenIdentifier>(
         modifiedIdentifier.getBytes(), containerToken.getPassword().array(),
         new Text(containerToken.getKind()), new Text(containerToken
@@ -288,6 +290,7 @@ public class TestContainerManagerSecurit
     // Now talk to the NM for launching the container with modified containerID
     final ContainerId containerID = allocatedContainer.getId();
 
+    /////////// Test calls with illegal containerIDs and illegal Resources
     UserGroupInformation unauthorizedUser = UserGroupInformation
         .createRemoteUser(containerID.toString());
     ContainerToken containerToken = allocatedContainer.getContainerToken();
@@ -303,9 +306,10 @@ public class TestContainerManagerSecurit
             containerToken.getKind()), new Text(containerToken.getService()));
 
     unauthorizedUser.addToken(token);
-    unauthorizedUser.doAs(new PrivilegedAction<Void>() {
+    ContainerManager client =
+        unauthorizedUser.doAs(new PrivilegedAction<ContainerManager>() {
       @Override
-      public Void run() {
+      public ContainerManager run() {
         ContainerManager client = (ContainerManager) yarnRPC.getProxy(
             ContainerManager.class, NetUtils
                 .createSocketAddr(allocatedContainer.getNodeId().toString()),
@@ -316,16 +320,76 @@ public class TestContainerManagerSecurit
         callWithIllegalContainerID(client, tokenId);
         callWithIllegalResource(client, tokenId);
 
+        return client;
+      }
+    });
+    
+    /////////// End of testing for illegal containerIDs and illegal Resources
+
+    /////////// Test calls with expired tokens
+    RPC.stopProxy(client);
+    unauthorizedUser = UserGroupInformation
+        .createRemoteUser(containerID.toString());
+
+    final ContainerTokenIdentifier newTokenId =
+        new ContainerTokenIdentifier(tokenId.getContainerID(),
+          tokenId.getNmHostAddress(), tokenId.getResource(),
+          System.currentTimeMillis() - 1);
+    byte[] passowrd =
+        resourceManager.getContainerTokenSecretManager().createPassword(
+            newTokenId);
+    // Create a valid token by using the key from the RM.
+    token = new Token<ContainerTokenIdentifier>(
+        newTokenId.getBytes(), passowrd, new Text(
+            containerToken.getKind()), new Text(containerToken.getService()));
+    
+    
+    
+    unauthorizedUser.addToken(token);
+    unauthorizedUser.doAs(new PrivilegedAction<Void>() {
+      @Override
+      public Void run() {
+        ContainerManager client = (ContainerManager) yarnRPC.getProxy(
+            ContainerManager.class, NetUtils
+                .createSocketAddr(allocatedContainer.getNodeId().toString()),
+            conf);
+
+        LOG.info("Going to contact NM with expired token");
+        ContainerLaunchContext context = createContainerLaunchContextForTest(newTokenId);
+        StartContainerRequest request = Records.newRecord(StartContainerRequest.class);
+        request.setContainerLaunchContext(context);
+
+        //Calling startContainer with an expired token.
+        try {
+          client.startContainer(request);
+          fail("Connection initiation with expired "
+              + "token is expected to fail.");
+        } catch (Throwable t) {
+          LOG.info("Got exception : ", t);
+          Assert.assertTrue(t.getMessage().contains(
+                  "This token is expired. current time is"));
+        }
+
+        // Try stopping a container - should not get an expiry error.
+        StopContainerRequest stopRequest = Records.newRecord(StopContainerRequest.class);
+        stopRequest.setContainerId(newTokenId.getContainerID());
+        try {
+          client.stopContainer(stopRequest);
+        } catch (Throwable t) {
+          fail("Stop Container call should have succeeded");
+        }
+        
         return null;
       }
     });
+    /////////// End of testing calls with expired tokens
 
     KillApplicationRequest request = Records
         .newRecord(KillApplicationRequest.class);
     request.setApplicationId(appID);
     resourceManager.getClientRMService().forceKillApplication(request);
   }
-
+  
   private AMRMProtocol submitAndRegisterApplication(
       ResourceManager resourceManager, final YarnRPC yarnRPC,
       ApplicationId appID) throws IOException,
@@ -481,11 +545,9 @@ public class TestContainerManagerSecurit
     StartContainerRequest request = recordFactory
         .newRecordInstance(StartContainerRequest.class);
     // Authenticated but unauthorized, due to wrong resource
-    ContainerLaunchContext context = BuilderUtils.newContainerLaunchContext(
-        tokenId.getContainerID(), "testUser", BuilderUtils.newResource(2048),
-        new HashMap<String, LocalResource>(), new HashMap<String, String>(),
-        new ArrayList<String>(), new HashMap<String, ByteBuffer>(), null,
-        new HashMap<ApplicationAccessType, String>());
+    ContainerLaunchContext context =
+        createContainerLaunchContextForTest(tokenId);
+    context.getResource().setMemory(2048); // Set a different resource size.
     request.setContainerLaunchContext(context);
     try {
       client.startContainer(request);
@@ -500,4 +562,17 @@ public class TestContainerManagerSecurit
               + " but found " + context.getResource().toString()));
     }
   }
+
+  private ContainerLaunchContext createContainerLaunchContextForTest(
+      ContainerTokenIdentifier tokenId) {
+    ContainerLaunchContext context =
+        BuilderUtils.newContainerLaunchContext(tokenId.getContainerID(),
+            "testUser",
+            BuilderUtils.newResource(tokenId.getResource().getMemory()),
+            new HashMap<String, LocalResource>(),
+            new HashMap<String, String>(), new ArrayList<String>(),
+            new HashMap<String, ByteBuffer>(), null,
+            new HashMap<ApplicationAccessType, String>());
+    return context;
+  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java Fri Aug  3 19:00:15 2012
@@ -110,6 +110,35 @@ public class TestDiskFailures {
     testDirsFailures(false);
   }
 
+  /**
+   * Make a local and log directory inaccessible during initialization
+   * and verify those bad directories are recognized and removed from
+   * the list of available local and log directories.
+   * @throws IOException
+   */
+  @Test
+  public void testDirFailuresOnStartup() throws IOException {
+    Configuration conf = new YarnConfiguration();
+    String localDir1 = new File(testDir, "localDir1").getPath();
+    String localDir2 = new File(testDir, "localDir2").getPath();
+    String logDir1 = new File(testDir, "logDir1").getPath();
+    String logDir2 = new File(testDir, "logDir2").getPath();
+    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
+    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2);
+
+    prepareDirToFail(localDir1);
+    prepareDirToFail(logDir2);
+
+    LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
+    dirSvc.init(conf);
+    List<String> localDirs = dirSvc.getLocalDirs();
+    Assert.assertEquals(1, localDirs.size());
+    Assert.assertEquals(localDir2, localDirs.get(0));
+    List<String> logDirs = dirSvc.getLogDirs();
+    Assert.assertEquals(1, logDirs.size());
+    Assert.assertEquals(logDir1, logDirs.get(0));
+  }
+
   private void testDirsFailures(boolean localORLogDirs) throws IOException {
     String dirType = localORLogDirs ? "local" : "log";
     String dirsProperty = localORLogDirs ? YarnConfiguration.NM_LOCAL_DIRS

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java Fri Aug  3 19:00:15 2012
@@ -27,6 +27,7 @@ import org.apache.hadoop.security.Securi
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.service.CompositeService;
 
@@ -73,6 +74,7 @@ public class WebAppProxyServer extends C
   }
 
   public static void main(String[] args) {
+    Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
     StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
     try {
       WebAppProxyServer proxy = new WebAppProxyServer();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm Fri Aug  3 19:00:15 2012
@@ -240,17 +240,24 @@ Hadoop MapReduce Next Generation - Capac
 *--------------------------------------+--------------------------------------+
 || Property                            || Description                         |
 *--------------------------------------+--------------------------------------+
-| <<<yarn.scheduler.capacity.maximum-applications>>> | |
+| <<<yarn.scheduler.capacity.maximum-applications>>> /  |
+| <<<yarn.scheduler.capacity.<queue-path>.maximum-applications>>>  | |
 | | Maximum number of applications in the system which can be concurrently |
 | | active both running and pending. Limits on each queue are directly |
 | | proportional to their queue capacities and user limits. This is a 
 | | hard limit and any applications submitted when this limit is reached will |
-| | be rejected. Default is 10000.|
+| | be rejected. Default is 10000. This can be set for all queues with |
+| | <<<yarn.scheduler.capacity.maximum-applications>>> and can also be overridden on a  |
+| | per queue basis by setting <<<yarn.scheduler.capacity.<queue-path>.maximum-applications>>>. |
 *--------------------------------------+--------------------------------------+
-| yarn.scheduler.capacity.maximum-am-resource-percent | |
+| <<<yarn.scheduler.capacity.maximum-am-resource-percent>>> / |
+| <<<yarn.scheduler.capacity.<queue-path>.maximum-am-resource-percent>>> | |
 | | Maximum percent of resources in the cluster which can be used to run |
-| | application masters - controls number of concurrent running applications. |
-| | Specified as a float - ie 0.5 = 50%. Default is 10%. |
+| | application masters - controls number of concurrent active applications. Limits on each |
+| | queue are directly proportional to their queue capacities and user limits. |
+| | Specified as a float - ie 0.5 = 50%. Default is 10%. This can be set for all queues with |
+| | <<<yarn.scheduler.capacity.maximum-am-resource-percent>>> and can also be overridden on a  |
+| | per queue basis by setting <<<yarn.scheduler.capacity.<queue-path>.maximum-am-resource-percent>>> |
 *--------------------------------------+--------------------------------------+
 
     * Queue Administration & Permissions

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm Fri Aug  3 19:00:15 2012
@@ -985,7 +985,7 @@ KVNO Timestamp         Principal
 | | </etc/security/keytab/jhs.service.keytab> | |
 | | | Kerberos keytab file for the MapReduce JobHistory Server. |
 *-------------------------+-------------------------+------------------------+
-| <<<mapreduce.jobhistory.principal>>> | mapred/_HOST@REALM.TLD | |
+| <<<mapreduce.jobhistory.principal>>> | jhs/_HOST@REALM.TLD | |
 | | | Kerberos principal name for the MapReduce JobHistory Server. |
 *-------------------------+-------------------------+------------------------+
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm Fri Aug  3 19:00:15 2012
@@ -1261,6 +1261,9 @@ History Server REST API's.
 *---------------+--------------+-------------------------------+
 | value | string | The value of the configuration property |
 *---------------+--------------+-------------------------------+
+| source | string | The location this configuration object came from. If there is more then one of these it shows the history with the latest source at the end of the list. |
+*---------------+--------------+-------------------------------+
+
 
 *** Response Examples
 
@@ -1293,14 +1296,17 @@ History Server REST API's.
          {  
             "value" : "/home/hadoop/hdfs/data",
             "name" : "dfs.datanode.data.dir"
+            "source" : ["hdfs-site.xml", "job.xml"]
          },
          {
             "value" : "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer",
             "name" : "hadoop.http.filter.initializers"
+            "source" : ["programatically", "job.xml"]
          },
          {
             "value" : "/home/hadoop/tmp",
             "name" : "mapreduce.cluster.temp.dir"
+            "source" : ["mapred-site.xml"]
          },
          ...
       ]
@@ -1335,14 +1341,19 @@ History Server REST API's.
   <property>
     <name>dfs.datanode.data.dir</name>
     <value>/home/hadoop/hdfs/data</value>
+    <source>hdfs-site.xml</source>
+    <source>job.xml</source>
   </property>
   <property>
     <name>hadoop.http.filter.initializers</name>
     <value>org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer</value>
+    <source>programatically</source>
+    <source>job.xml</source>
   </property>
   <property>
     <name>mapreduce.cluster.temp.dir</name>
     <value>/home/hadoop/tmp</value>
+    <source>mapred-site.xml</source>
   </property>
   ...
 </conf>

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm Fri Aug  3 19:00:15 2012
@@ -1296,6 +1296,8 @@ MapReduce Application Master REST API's.
 *---------------+--------------+-------------------------------+
 | value | string | The value of the configuration property |
 *---------------+--------------+-------------------------------+
+| source | string | The location this configuration object came from. If there is more then one of these it shows the history with the latest source at the end of the list. |
+*---------------+--------------+-------------------------------+
 
 ** Response Examples
 
@@ -1327,15 +1329,18 @@ MapReduce Application Master REST API's.
       "property" : [
          {  
             "value" : "/home/hadoop/hdfs/data",
-            "name" : "dfs.datanode.data.dir"
+            "name" : "dfs.datanode.data.dir",
+            "source" : ["hdfs-site.xml", "job.xml"]
          },
          {
             "value" : "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer",
             "name" : "hadoop.http.filter.initializers"
+            "source" : ["programatically", "job.xml"]
          },
          {
             "value" : "/home/hadoop/tmp",
             "name" : "mapreduce.cluster.temp.dir"
+            "source" : ["mapred-site.xml"]
          },
          ...
       ]
@@ -1370,14 +1375,19 @@ MapReduce Application Master REST API's.
   <property>
     <name>dfs.datanode.data.dir</name>
     <value>/home/hadoop/hdfs/data</value>
+    <source>hdfs-site.xml</source>
+    <source>job.xml</source>
   </property>
   <property>
     <name>hadoop.http.filter.initializers</name>
     <value>org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer</value>
+    <source>programatically</source>
+    <source>job.xml</source>
   </property>
   <property>
     <name>mapreduce.cluster.temp.dir</name>
     <value>/home/hadoop/tmp</value>
+    <source>mapred-site.xml</source>
   </property>
   ...
 </conf>

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebHDFS.apt.vm?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebHDFS.apt.vm Fri Aug  3 19:00:15 2012
@@ -145,7 +145,7 @@ WebHDFS REST API
 *-------------------------------------------------+---------------------------------------------------+
 | <<<dfs.webhdfs.enabled                      >>> | Enable/disable WebHDFS in Namenodes and Datanodes |
 *-------------------------------------------------+---------------------------------------------------+
-| <<<dfs.web.authentication.kerberos.principal>>> | The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint. The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos HTTP SPENGO specification. |
+| <<<dfs.web.authentication.kerberos.principal>>> | The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint. The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos HTTP SPNEGO specification. |
 *-------------------------------------------------+---------------------------------------------------+
 | <<<dfs.web.authentication.kerberos.keytab   >>> | The Kerberos keytab file with the credentials for the HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint. |
 *-------------------------------------------------+---------------------------------------------------+

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/index.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/index.apt.vm?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/index.apt.vm (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/index.apt.vm Fri Aug  3 19:00:15 2012
@@ -49,4 +49,7 @@ MapReduce NextGen aka YARN aka MRv2
 
   * {{{./WebApplicationProxy.html}Web Application Proxy}}
 
+  * {{{./CLIMiniCluster.html}CLI MiniCluster}}
+
+  * {{{./EncryptedShuffle.html}Encrypted Shuffle}}
 

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/c++/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/c++:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/block_forensics/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/build-contrib.xml
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:r1358480-1369130

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/build.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/build.xml?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/build.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/build.xml Fri Aug  3 19:00:15 2012
@@ -62,7 +62,6 @@
       <fileset dir="." includes="streaming/build.xml"/> 
       <fileset dir="." includes="gridmix/build.xml"/>
       <fileset dir="." includes="vertica/build.xml"/>
-      <fileset dir="." includes="raid/build.xml"/>
     </subant>
     <available file="${build.contrib.dir}/testsfailed" property="testsfailed"/>
     <fail if="testsfailed">Tests failed!</fail>

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/build.xml
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/data_join/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/eclipse-plugin/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/index/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/contrib/vaidya/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/examples/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/examples:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/java:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/test/mapred/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:r1358480-1369130

Modified: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java Fri Aug  3 19:00:15 2012
@@ -33,7 +33,6 @@ import org.apache.hadoop.io.WritableComp
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapred.lib.HashPartitioner;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.fs.*;
@@ -345,7 +344,8 @@ public class SortValidator extends Confi
 
       FileInputFormat.setInputPaths(jobConf, sortInput);
       FileInputFormat.addInputPath(jobConf, sortOutput);
-      Path outputPath = new Path("/tmp/sortvalidate/recordstatschecker");
+      Path outputPath = new Path(new Path("/tmp",
+           "sortvalidate"), UUID.randomUUID().toString());
       if (defaultfs.exists(outputPath)) {
         defaultfs.delete(outputPath, true);
       }
@@ -365,31 +365,44 @@ public class SortValidator extends Confi
       Date startTime = new Date();
       System.out.println("Job started: " + startTime);
       JobClient.runJob(jobConf);
-      Date end_time = new Date();
-      System.out.println("Job ended: " + end_time);
-      System.out.println("The job took " + 
-                         (end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
-      
-      // Check to ensure that the statistics of the 
-      // framework's sort-input and sort-output match
-      SequenceFile.Reader stats = new SequenceFile.Reader(defaultfs,
-                                                          new Path(outputPath, "part-00000"), defaults);
-      IntWritable k1 = new IntWritable();
-      IntWritable k2 = new IntWritable();
-      RecordStatsWritable v1 = new RecordStatsWritable();
-      RecordStatsWritable v2 = new RecordStatsWritable();
-      if (!stats.next(k1, v1)) {
-        throw new IOException("Failed to read record #1 from reduce's output");
-      }
-      if (!stats.next(k2, v2)) {
-        throw new IOException("Failed to read record #2 from reduce's output");
-      }
-
-      if ((v1.getBytes() != v2.getBytes()) || (v1.getRecords() != v2.getRecords()) || 
-          v1.getChecksum() != v2.getChecksum()) {
-        throw new IOException("(" + 
-                              v1.getBytes() + ", " + v1.getRecords() + ", " + v1.getChecksum() + ") v/s (" +
-                              v2.getBytes() + ", " + v2.getRecords() + ", " + v2.getChecksum() + ")");
+      try {
+        Date end_time = new Date();
+        System.out.println("Job ended: " + end_time);
+        System.out.println("The job took " + 
+            (end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
+
+        // Check to ensure that the statistics of the 
+        // framework's sort-input and sort-output match
+        SequenceFile.Reader stats = new SequenceFile.Reader(defaultfs,
+            new Path(outputPath, "part-00000"), defaults);
+        try {
+          IntWritable k1 = new IntWritable();
+          IntWritable k2 = new IntWritable();
+          RecordStatsWritable v1 = new RecordStatsWritable();
+          RecordStatsWritable v2 = new RecordStatsWritable();
+          if (!stats.next(k1, v1)) {
+            throw new IOException(
+                "Failed to read record #1 from reduce's output");
+          }
+          if (!stats.next(k2, v2)) {
+            throw new IOException(
+                "Failed to read record #2 from reduce's output");
+          }
+
+          if ((v1.getBytes() != v2.getBytes()) || 
+              (v1.getRecords() != v2.getRecords()) || 
+              v1.getChecksum() != v2.getChecksum()) {
+            throw new IOException("(" + 
+                v1.getBytes() + ", " + v1.getRecords() + ", " + v1.getChecksum()
+                + ") v/s (" +
+                v2.getBytes() + ", " + v2.getRecords() + ", " + v2.getChecksum()
+                + ")");
+          }
+        } finally {
+          stats.close();
+        }
+      } finally {
+        defaultfs.delete(outputPath, true);
       }
     }
 

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-mapreduce-project/src/webapps/job/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/webapps/job:r1358480-1369130



Mime
View raw message