hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r1101640 [1/2] - in /hadoop/common/branches/branch-0.20-security-204: ./ conf/ ivy/ src/ant/org/apache/hadoop/ant/condition/ src/c++/libhdfs/ src/contrib/ src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/ src/contrib/ecl...
Date Tue, 10 May 2011 20:52:36 GMT
Author: omalley
Date: Tue May 10 20:52:35 2011
New Revision: 1101640

URL: http://svn.apache.org/viewvc?rev=1101640&view=rev
Log:
svn merge -r 1097011:1099333 from branch-0.20-security-203.

Added:
    hadoop/common/branches/branch-0.20-security-204/conf/hadoop-metrics2.properties
      - copied unchanged from r1099333, hadoop/common/branches/branch-0.20-security-203/conf/hadoop-metrics2.properties
    hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java
      - copied unchanged from r1099333, hadoop/common/branches/branch-0.20-security-203/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java
    hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/fs/HarFileSystem.java
      - copied unchanged from r1099333, hadoop/common/branches/branch-0.20-security-203/src/core/org/apache/hadoop/fs/HarFileSystem.java
    hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/DoNotPool.java
      - copied unchanged from r1099333, hadoop/common/branches/branch-0.20-security-203/src/core/org/apache/hadoop/io/compress/DoNotPool.java
    hadoop/common/branches/branch-0.20-security-204/src/test/org/apache/hadoop/fs/TestHarFileSystem.java
      - copied unchanged from r1099333, hadoop/common/branches/branch-0.20-security-203/src/test/org/apache/hadoop/fs/TestHarFileSystem.java
    hadoop/common/branches/branch-0.20-security-204/src/tools/org/apache/hadoop/tools/HadoopArchives.java
      - copied unchanged from r1099333, hadoop/common/branches/branch-0.20-security-203/src/tools/org/apache/hadoop/tools/HadoopArchives.java
Removed:
    hadoop/common/branches/branch-0.20-security-204/conf/hadoop-metrics2.properties.example
Modified:
    hadoop/common/branches/branch-0.20-security-204/   (props changed)
    hadoop/common/branches/branch-0.20-security-204/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/branch-0.20-security-204/conf/log4j.properties
    hadoop/common/branches/branch-0.20-security-204/ivy/libraries.properties
    hadoop/common/branches/branch-0.20-security-204/src/ant/org/apache/hadoop/ant/condition/DfsBaseConditional.java
    hadoop/common/branches/branch-0.20-security-204/src/c++/libhdfs/hdfs.c
    hadoop/common/branches/branch-0.20-security-204/src/c++/libhdfs/hdfsJniHelper.h
    hadoop/common/branches/branch-0.20-security-204/src/contrib/build-contrib.xml
    hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java
    hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
    hadoop/common/branches/branch-0.20-security-204/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java
    hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
    hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java
    hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/conf/Configuration.java
    hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/CodecPool.java
    hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
    hadoop/common/branches/branch-0.20-security-204/src/core/overview.html
    hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml
    hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/fair_scheduler.xml
    hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/single_node_setup.xml
    hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordCount.java
    hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java
    hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/dancing/package.html
    hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java
    hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java
    hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java
    hadoop/common/branches/branch-0.20-security-204/src/mapred/   (props changed)
    hadoop/common/branches/branch-0.20-security-204/src/mapred/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java
    hadoop/common/branches/branch-0.20-security-204/src/test/org/apache/hadoop/io/compress/TestCodec.java
    hadoop/common/branches/branch-0.20-security-204/src/test/org/apache/hadoop/mapred/lib/TestCombineFileInputFormat.java
    hadoop/common/branches/branch-0.20-security-204/src/test/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java

Propchange: hadoop/common/branches/branch-0.20-security-204/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue May 10 20:52:35 2011
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.20:829987,831184,909245,909723,960946
-/hadoop/common/branches/branch-0.20-security:1098837
-/hadoop/common/branches/branch-0.20-security-203:1096071
+/hadoop/common/branches/branch-0.20:826138,826568,829987,831184,833001,880632,898713,909245,909723,960946
+/hadoop/common/branches/branch-0.20-security:1097202,1098837
+/hadoop/common/branches/branch-0.20-security-203:1096071,1097012-1099333
 /hadoop/core/branches/branch-0.19:713112
 /hadoop/core/trunk:727001,727117,727191,727212,727217,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,736426,738328,738697,740077,740157,741703,741762,743745,743816,743892,744894,745180,746010,746206,746227,746233,746274,746338,746902-746903,746925,746944,746968,746970,747279,747289,747802,748084,748090,748783,749262,749318,749863,750533,752073,752609,752834,752836,752913,752932,753112-753113,753346,754645,754847,754927,755035,755226,755348,755370,755418,755426,755790,755905,755938,755960,755986,755998,756352,757448,757624,757849,758156,758180,759398,759932,760502,760783,761046,761482,761632,762216,762879,763107,763502,764967,765016,765809,765951,771607,771661,772844,772876,772884,772920,773889,776638,778962,778966,779893,781720,784661,785046,785569

Modified: hadoop/common/branches/branch-0.20-security-204/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/CHANGES.txt?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.20-security-204/CHANGES.txt Tue May 10 20:52:35 2011
@@ -40,9 +40,6 @@ Release 0.20.204.0 - unreleased
     HDFS-1258. Clearing namespace quota on "/" corrupts fs image.  
     (Aaron T. Myers via szetszwo)
 
-    HADOOP-7215. RPC clients must use network interface corresponding to 
-    the host in the client's kerberos principal key. (suresh)
-
     HDFS-1189. Quota counts missed between clear quota and set quota.
     (John George via szetszwo)
 
@@ -75,6 +72,26 @@ Release 0.20.204.0 - unreleased
 
 Release 0.20.203.0 - unreleased
 
+    MAPREDUCE-1280. Update Eclipse plugin to the new eclipse.jdt API.
+    (Alex Kozlov via szetszwo)
+
+    HADOOP-7259. Contrib modules should include the build.properties from
+    the enclosing hadoop directory. (omalley)
+
+    HADOOP-7253. Update the default configuration to fix security audit log
+    and metrics2 property configuration warnings. (omalley)
+
+    HADOOP-7247. Update documentation to match current jar names. (omalley)
+
+    HADOOP-7246. Update the log4j configuration to match the EventCounter
+    package. (Luke Lu via omalley)
+
+    HADOOP-7143. Restore HadoopArchives. (Joep Rottinghuis via omalley)
+
+    MAPREDUCE-2316. Updated CapacityScheduler documentation. (acmurthy) 
+
+    HADOOP-7243. Fix contrib unit tests missing dependencies. (omalley)
+
     HADOOP-7190. Add metrics v1 back for backwards compatibility. (omalley)
 
     MAPREDUCE-2360. Remove stripping of scheme, authority from submit dir in 
@@ -125,8 +142,13 @@ Release 0.20.203.0 - unreleased
     HADOOP-6879. Provide SSH based (Jsch) remote execution API for system
     tests. (cos)
 
+    HADOOP-7215. RPC clients must use network interface corresponding to 
+    the host in the client's kerberos principal key. (suresh)
+
     HADOOP-7232. Fix Javadoc warnings. (omalley)
 
+    HADOOP-7258. The Gzip codec should not return null decompressors. (omalley)
+
 Release 0.20.202.0 - unreleased
 
     MAPREDUCE-2355. Add a configuration knob 
@@ -1764,6 +1786,15 @@ Release 0.20.2 - Unreleased
     MAPREDUCE-1163. Remove unused, hard-coded paths from libhdfs. (Allen
     Wittenauer via cdouglas)
 
+    HADOOP-6315. Avoid incorrect use of BuiltInflater/BuiltInDeflater in
+    GzipCodec. (Aaron Kimball via cdouglas)
+
+    HADOOP-6269. Fix threading issue with defaultResource in Configuration.
+    (Sreekanth Ramakrishnan via cdouglas)
+
+    HADOOP-5759. Fix for  IllegalArgumentException when CombineFileInputFormat
+    is used as job InputFormat. (Amareshwari Sriramadasu via zshao)
+
 Release 0.20.1 - 2009-09-01
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/branch-0.20-security-204/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue May 10 20:52:35 2011
@@ -1,6 +1,6 @@
-/hadoop/common/branches/branch-0.20/CHANGES.txt:829987,831184,909245,909723,960946
-/hadoop/common/branches/branch-0.20-security/CHANGES.txt:1098837
-/hadoop/common/branches/branch-0.20-security-203/CHANGES.txt:1096071
+/hadoop/common/branches/branch-0.20/CHANGES.txt:826138,826568,829987,831184,833001,880632,898713,909245,909723,960946
+/hadoop/common/branches/branch-0.20-security/CHANGES.txt:1097202,1098837
+/hadoop/common/branches/branch-0.20-security-203/CHANGES.txt:1096071,1097012-1099333
 /hadoop/core/branches/branch-0.18/CHANGES.txt:727226
 /hadoop/core/branches/branch-0.19/CHANGES.txt:713112
 /hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746925,746944,746968,746970,747279,747289,747802,748084,748090,748783,749262,749318,749863,750533,752073,752514,752555,752590,752609,752834,752836,752913,752932,753112-753113,753346,754645,754847,754927,755035,755226,755348,755370,755418,755426,755790,755905,755938,755986,755998,756352,757448,757624,757849,758156,758180,759398,759932,760502,760783,761046,761482,761632,762216,762879,763107,763502,764967,765016,765809,765951,771607,772844,772876,772884,772920,773889,776638,778962,778966,779893,781720,784661,785046,785569

Modified: hadoop/common/branches/branch-0.20-security-204/conf/log4j.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/conf/log4j.properties?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/conf/log4j.properties (original)
+++ hadoop/common/branches/branch-0.20-security-204/conf/log4j.properties Tue May 10 20:52:35 2011
@@ -79,7 +79,8 @@ log4j.appender.DRFAS.File=${hadoop.log.d
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #new logger
-log4j.category.SecurityLogger=INFO,DRFAS
+log4j.logger.SecurityLogger=OFF,console
+log4j.logger.SecurityLogger.additivity=false
 
 #
 # Rolling File Appender
@@ -100,7 +101,7 @@ log4j.category.SecurityLogger=INFO,DRFAS
 # FSNamesystem Audit logging
 # All audit events are logged at INFO level
 #
-log4j.logger.org.apache.hadoop.fs.FSNamesystem.audit=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
 
 # Custom Logging levels
 
@@ -117,7 +118,7 @@ log4j.logger.org.jets3t.service.impl.res
 # Event Counter Appender
 # Sends counts of logging messages at different severity levels to Hadoop Metrics.
 #
-log4j.appender.EventCounter=org.apache.hadoop.log.EventCounter
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 
 #
 # Job Summary Appender

Modified: hadoop/common/branches/branch-0.20-security-204/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/ivy/libraries.properties?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-0.20-security-204/ivy/libraries.properties Tue May 10 20:52:35 2011
@@ -14,7 +14,7 @@
 #It drives ivy and the generation of a maven POM
 
 # This is the version of hadoop we are generating
-hadoop.version=0.20.0
+hadoop.version=0.20.203.0
 hadoop-gpl-compression.version=0.1.0
 
 #These are the versions of our dependencies (in alphabetical order)

Modified: hadoop/common/branches/branch-0.20-security-204/src/ant/org/apache/hadoop/ant/condition/DfsBaseConditional.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/ant/org/apache/hadoop/ant/condition/DfsBaseConditional.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/ant/org/apache/hadoop/ant/condition/DfsBaseConditional.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/ant/org/apache/hadoop/ant/condition/DfsBaseConditional.java Tue May 10 20:52:35 2011
@@ -56,7 +56,7 @@ public abstract class DfsBaseConditional
 
   protected int postCmd(int exit_code) {
     exit_code = super.postCmd(exit_code);
-    result = exit_code == 1;
+    result = exit_code == 0;
     return exit_code;
   }
 

Modified: hadoop/common/branches/branch-0.20-security-204/src/c++/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/c%2B%2B/libhdfs/hdfs.c?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/c++/libhdfs/hdfs.c (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/c++/libhdfs/hdfs.c Tue May 10 20:52:35 2011
@@ -2022,12 +2022,18 @@ hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs,
 
 void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
 {
-    //Free the mName
+    //Free the mName, mOwner, and mGroup
     int i;
     for (i=0; i < numEntries; ++i) {
         if (hdfsFileInfo[i].mName) {
             free(hdfsFileInfo[i].mName);
         }
+        if (hdfsFileInfo[i].mOwner) {
+            free(hdfsFileInfo[i].mOwner);
+        }
+        if (hdfsFileInfo[i].mGroup) {
+            free(hdfsFileInfo[i].mGroup);
+        }
     }
 
     //Free entire block

Modified: hadoop/common/branches/branch-0.20-security-204/src/c++/libhdfs/hdfsJniHelper.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/c%2B%2B/libhdfs/hdfsJniHelper.h?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/c++/libhdfs/hdfsJniHelper.h (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/c++/libhdfs/hdfsJniHelper.h Tue May 10 20:52:35 2011
@@ -30,8 +30,6 @@
 
 #define PATH_SEPARATOR ':'
 
-#define USER_CLASSPATH "/home/y/libexec/hadoop/conf:/home/y/libexec/hadoop/lib/hadoop-0.1.0.jar"
-
 
 /** Denote the method we want to invoke as STATIC or INSTANCE */
 typedef enum {

Modified: hadoop/common/branches/branch-0.20-security-204/src/contrib/build-contrib.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/contrib/build-contrib.xml?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/contrib/build-contrib.xml (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/contrib/build-contrib.xml Tue May 10 20:52:35 2011
@@ -23,13 +23,14 @@
 
   <property name="name" value="${ant.project.name}"/>
   <property name="root" value="${basedir}"/>
+  <property name="hadoop.root" location="${root}/../../../"/>
 
   <!-- Load all the default properties, and any the user wants    -->
   <!-- to contribute (without having to type -D or edit this file -->
   <property file="${user.home}/${name}.build.properties" />
   <property file="${root}/build.properties" />
+  <property file="${hadoop.root}/build.properties" />
 
-  <property name="hadoop.root" location="${root}/../../../"/>
   <property name="src.dir"  location="${root}/src/java"/>
   <property name="src.test" location="${root}/src/test"/>
   <!-- Property added for contrib system tests -->

Modified: hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java Tue May 10 20:52:35 2011
@@ -32,7 +32,7 @@ import org.eclipse.debug.core.ILaunchCon
 import org.eclipse.jdt.core.IJavaProject;
 import org.eclipse.jdt.core.IType;
 import org.eclipse.jdt.core.JavaCore;
-import org.eclipse.jdt.internal.debug.ui.launcher.JavaApplicationLaunchShortcut;
+import org.eclipse.jdt.debug.ui.launchConfigurations.JavaApplicationLaunchShortcut;
 import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
 import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
 import org.eclipse.jdt.launching.JavaRuntime;
@@ -64,7 +64,7 @@ public class HadoopApplicationLaunchShor
     // Find an existing or create a launch configuration (Standard way)
     ILaunchConfiguration iConf =
         super.findLaunchConfiguration(type, configType);
-
+    if (iConf == null) iConf = super.createConfiguration(type);
     ILaunchConfigurationWorkingCopy iConfWC;
     try {
       /*

Modified: hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java Tue May 10 20:52:35 2011
@@ -159,9 +159,9 @@ public class RunOnHadoopWizard extends W
 
     // Write it to the disk file
     try {
-      // File confFile = File.createTempFile("hadoop-site-", ".xml",
+      // File confFile = File.createTempFile("core-site-", ".xml",
       // confDir);
-      File confFile = new File(confDir, "hadoop-site.xml");
+      File confFile = new File(confDir, "core-site.xml");
       FileOutputStream fos = new FileOutputStream(confFile);
       conf.writeXml(fos);
       fos.close();

Modified: hadoop/common/branches/branch-0.20-security-204/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java Tue May 10 20:52:35 2011
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.mapred;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.io.PrintWriter;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
@@ -122,7 +124,12 @@ public class FairSchedulerServlet extend
     }
     // Print out the normal response
     response.setContentType("text/html");
-    PrintWriter out = new PrintWriter(response.getOutputStream());
+
+    // Because the client may read arbitrarily slow, and we hold locks while
+    // the servlet output, we want to write to our own buffer which we know
+    // won't block.
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter out = new PrintWriter(baos);
     String hostname = StringUtils.simpleHostname(
         jobTracker.getJobTrackerMachine());
     out.print("<html><head>");
@@ -137,6 +144,11 @@ public class FairSchedulerServlet extend
     showAdminForm(out, advancedView);
     out.print("</body></html>\n");
     out.close();
+
+    // Flush our buffer to the real servlet output
+    OutputStream servletOut = response.getOutputStream();
+    baos.writeTo(servletOut);
+    servletOut.close();
   }
 
   /**
@@ -206,51 +218,53 @@ public class FairSchedulerServlet extend
     out.print("<th>Finished</th><th>Running</th><th>Fair Share</th>" +
         (advancedView ? "<th>Weight</th><th>Deficit</th><th>minReduces</th>" : ""));
     out.print("</tr>\n");
-    Collection<JobInProgress> runningJobs = jobTracker.getRunningJobs();
-    synchronized (scheduler) {
-      for (JobInProgress job: runningJobs) {
-        JobProfile profile = job.getProfile();
-        JobInfo info = scheduler.infos.get(job);
-        if (info == null) { // Job finished, but let's show 0's for info
-          info = new JobInfo();
-        }
-        out.print("<tr>\n");
-        out.printf("<td>%s</td>\n", DATE_FORMAT.format(
-            new Date(job.getStartTime())));
-        out.printf("<td><a href=\"jobdetails.jsp?jobid=%s\">%s</a></td>",
-            profile.getJobID(), profile.getJobID());
-        out.printf("<td>%s</td>\n", profile.getUser());
-        out.printf("<td>%s</td>\n", profile.getJobName());
-        out.printf("<td>%s</td>\n", generateSelect(
-            scheduler.getPoolManager().getPoolNames(),
-            scheduler.getPoolManager().getPoolName(job),
-            "/scheduler?setPool=<CHOICE>&jobid=" + profile.getJobID() +
-            (advancedView ? "&advanced" : "")));
-        out.printf("<td>%s</td>\n", generateSelect(
-            Arrays.asList(new String[]
-                {"VERY_LOW", "LOW", "NORMAL", "HIGH", "VERY_HIGH"}),
-            job.getPriority().toString(),
-            "/scheduler?setPriority=<CHOICE>&jobid=" + profile.getJobID() +
-            (advancedView ? "&advanced" : "")));
-        out.printf("<td>%d / %d</td><td>%d</td><td>%8.1f</td>\n",
-            job.finishedMaps(), job.desiredMaps(), info.runningMaps,
-            info.mapFairShare);
-        if (advancedView) {
-          out.printf("<td>%8.1f</td>\n", info.mapWeight);
-          out.printf("<td>%s</td>\n", info.neededMaps > 0 ?
-              (info.mapDeficit / 1000) + "s" : "--");
-          out.printf("<td>%d</td>\n", info.minMaps);
-        }
-        out.printf("<td>%d / %d</td><td>%d</td><td>%8.1f</td>\n",
-            job.finishedReduces(), job.desiredReduces(), info.runningReduces,
-            info.reduceFairShare);
-        if (advancedView) {
-          out.printf("<td>%8.1f</td>\n", info.reduceWeight);
-          out.printf("<td>%s</td>\n", info.neededReduces > 0 ?
-              (info.reduceDeficit / 1000) + "s" : "--");
-          out.printf("<td>%d</td>\n", info.minReduces);
+    synchronized (jobTracker) {
+      Collection<JobInProgress> runningJobs = jobTracker.getRunningJobs();
+      synchronized (scheduler) {
+        for (JobInProgress job: runningJobs) {
+          JobProfile profile = job.getProfile();
+          JobInfo info = scheduler.infos.get(job);
+          if (info == null) { // Job finished, but let's show 0's for info
+            info = new JobInfo();
+          }
+          out.print("<tr>\n");
+          out.printf("<td>%s</td>\n", DATE_FORMAT.format(
+                       new Date(job.getStartTime())));
+          out.printf("<td><a href=\"jobdetails.jsp?jobid=%s\">%s</a></td>",
+                     profile.getJobID(), profile.getJobID());
+          out.printf("<td>%s</td>\n", profile.getUser());
+          out.printf("<td>%s</td>\n", profile.getJobName());
+          out.printf("<td>%s</td>\n", generateSelect(
+                       scheduler.getPoolManager().getPoolNames(),
+                       scheduler.getPoolManager().getPoolName(job),
+                       "/scheduler?setPool=<CHOICE>&jobid=" + profile.getJobID() +
+                       (advancedView ? "&advanced" : "")));
+          out.printf("<td>%s</td>\n", generateSelect(
+                       Arrays.asList(new String[]
+                         {"VERY_LOW", "LOW", "NORMAL", "HIGH", "VERY_HIGH"}),
+                       job.getPriority().toString(),
+                       "/scheduler?setPriority=<CHOICE>&jobid=" + profile.getJobID() +
+                       (advancedView ? "&advanced" : "")));
+          out.printf("<td>%d / %d</td><td>%d</td><td>%8.1f</td>\n",
+                     job.finishedMaps(), job.desiredMaps(), info.runningMaps,
+                     info.mapFairShare);
+          if (advancedView) {
+            out.printf("<td>%8.1f</td>\n", info.mapWeight);
+            out.printf("<td>%s</td>\n", info.neededMaps > 0 ?
+                       (info.mapDeficit / 1000) + "s" : "--");
+            out.printf("<td>%d</td>\n", info.minMaps);
+          }
+          out.printf("<td>%d / %d</td><td>%d</td><td>%8.1f</td>\n",
+                     job.finishedReduces(), job.desiredReduces(), info.runningReduces,
+                     info.reduceFairShare);
+          if (advancedView) {
+            out.printf("<td>%8.1f</td>\n", info.reduceWeight);
+            out.printf("<td>%s</td>\n", info.neededReduces > 0 ?
+                       (info.reduceDeficit / 1000) + "s" : "--");
+            out.printf("<td>%d</td>\n", info.minReduces);
+          }
+          out.print("</tr>\n");
         }
-        out.print("</tr>\n");
       }
     }
     out.print("</table>\n");

Modified: hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java Tue May 10 20:52:35 2011
@@ -385,7 +385,11 @@ public abstract class PipeMapRed {
           if (now-lastStdoutReport > reporterOutDelay_) {
             lastStdoutReport = now;
             String hline = "Records R/W=" + numRecRead_ + "/" + numRecWritten_;
-            reporter.setStatus(hline);
+            if (!processProvidedStatus_) {
+              reporter.setStatus(hline);
+            } else {
+              reporter.progress();
+            }
             logprintln(hline);
             logflush();
           }
@@ -446,6 +450,7 @@ public abstract class PipeMapRed {
             if (matchesCounter(lineStr)) {
               incrCounter(lineStr);
             } else if (matchesStatus(lineStr)) {
+              processProvidedStatus_ = true;
               setStatus(lineStr);
             } else {
               LOG.warn("Cannot parse reporter line: " + lineStr);
@@ -671,4 +676,5 @@ public abstract class PipeMapRed {
   String LOGNAME;
   PrintStream log_;
 
+  volatile boolean processProvidedStatus_ = false;
 }

Modified: hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java Tue May 10 20:52:35 2011
@@ -32,8 +32,16 @@ public class StderrApp
    * postWriteLines to stderr.
    */
   public static void go(int preWriteLines, int sleep, int postWriteLines) throws IOException {
+    go(preWriteLines, sleep, postWriteLines, false);
+  }
+  
+  public static void go(int preWriteLines, int sleep, int postWriteLines, boolean status) throws IOException {
     BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
     String line;
+    
+    if (status) {
+      System.err.println("reporter:status:starting echo");
+    }      
        
     while (preWriteLines > 0) {
       --preWriteLines;
@@ -57,13 +65,14 @@ public class StderrApp
 
   public static void main(String[] args) throws IOException {
     if (args.length < 3) {
-      System.err.println("Usage: StderrApp PREWRITE SLEEP POSTWRITE");
+      System.err.println("Usage: StderrApp PREWRITE SLEEP POSTWRITE [STATUS]");
       return;
     }
     int preWriteLines = Integer.parseInt(args[0]);
     int sleep = Integer.parseInt(args[1]);
     int postWriteLines = Integer.parseInt(args[2]);
+    boolean status = args.length > 3 ? Boolean.parseBoolean(args[3]) : false;
     
-    go(preWriteLines, sleep, postWriteLines);
+    go(preWriteLines, sleep, postWriteLines, status);
   }
 }

Modified: hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/conf/Configuration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/conf/Configuration.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/conf/Configuration.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/conf/Configuration.java Tue May 10 20:52:35 2011
@@ -43,6 +43,7 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.StringTokenizer;
 import java.util.WeakHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -165,8 +166,8 @@ public class Configuration implements It
    * List of default Resources. Resources are loaded in the order of the list 
    * entries
    */
-  private static final ArrayList<String> defaultResources = 
-    new ArrayList<String>();
+  private static final CopyOnWriteArrayList<String> defaultResources =
+    new CopyOnWriteArrayList<String>();
   
   /**
    * Flag to indicate if the storage of resource which updates a key needs 
@@ -1356,7 +1357,7 @@ public class Configuration implements It
     return sb.toString();
   }
 
-  private void toString(ArrayList resources, StringBuffer sb) {
+  private void toString(List resources, StringBuffer sb) {
     ListIterator i = resources.listIterator();
     while (i.hasNext()) {
       if (i.nextIndex() != 0) {

Modified: hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/CodecPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/CodecPool.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/CodecPool.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/CodecPool.java Tue May 10 20:52:35 2011
@@ -141,6 +141,10 @@ public class CodecPool {
     if (compressor == null) {
       return;
     }
+    // if the compressor can't be reused, don't pool it.
+    if (compressor.getClass().isAnnotationPresent(DoNotPool.class)) {
+      return;
+    }
     compressor.reset();
     payback(compressorPool, compressor);
   }
@@ -155,6 +159,10 @@ public class CodecPool {
     if (decompressor == null) {
       return;
     }
+    // if the decompressor can't be reused, don't pool it.
+    if (decompressor.getClass().isAnnotationPresent(DoNotPool.class)) {
+      return;
+    }
     decompressor.reset();
     payback(decompressorPool, decompressor);
   }

Modified: hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/core/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java Tue May 10 20:52:35 2011
@@ -24,12 +24,14 @@ import java.util.zip.Inflater;
 import java.util.zip.CRC32;
 
 import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DoNotPool;
 
 /**
  * A {@link Decompressor} based on the popular gzip compressed file format.
  * http://www.gzip.org/
  *
  */
+@DoNotPool
 public class BuiltInGzipDecompressor implements Decompressor {
   private static final int GZIP_MAGIC_ID = 0x8b1f;  // if read as LE short int
   private static final int GZIP_DEFLATE_METHOD = 8;

Modified: hadoop/common/branches/branch-0.20-security-204/src/core/overview.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/core/overview.html?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/core/overview.html (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/core/overview.html Tue May 10 20:52:35 2011
@@ -114,7 +114,7 @@ be demonstrated as follows:</p>
 <tt>
 mkdir input<br>
 cp conf/*.xml input<br>
-bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'<br>
+bin/hadoop jar hadoop-examples-*.jar grep input output 'dfs[a-z.]+'<br>
 cat output/*
 </tt>
 <p>This will display counts for each match of the <a
@@ -217,7 +217,7 @@ command, run on the master node:</p>
 examine it:</p>
 
 <tt>
-bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'<br>
+bin/hadoop jar hadoop-examples-*.jar grep input output 'dfs[a-z.]+'<br>
 bin/hadoop fs -get output output
 cat output/*
 </tt>

Modified: hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml Tue May 10 20:52:35 2011
@@ -20,7 +20,7 @@
 <document>
   
   <header>
-    <title>Capacity Scheduler Guide</title>
+    <title>CapacityScheduler Guide</title>
   </header>
   
   <body>
@@ -28,93 +28,127 @@
     <section>
       <title>Purpose</title>
       
-      <p>This document describes the Capacity Scheduler, a pluggable 
-      MapReduce scheduler for Hadoop which provides a way to share 
-      large clusters.</p>
+      <p>This document describes the CapacityScheduler, a pluggable 
+      MapReduce scheduler for Hadoop which allows for multiple-tenants to 
+      securely share a large cluster such that their applications are allocated
+      resources in a timely manner under constraints of allocated capacities.
+      </p>
+    </section>
+    
+    <section>
+      <title>Overview</title>
+     
+      <p>The CapacityScheduler is designed to run Hadoop Map-Reduce as a 
+      shared, multi-tenant cluster in an operator-friendly manner while 
+      maximizing the throughput and the utilization of the cluster while
+      running Map-Reduce applications. </p>
+     
+      <p>Traditionally each organization has it own private set of compute 
+      resources that have sufficient capacity to meet the organization's SLA 
+      under peak or near peak conditions. This generally leads to poor average 
+      utilization and the overhead of managing multiple independent clusters, 
+      one per each organization. Sharing clusters between organizations is a 
+      cost-effective manner of running large Hadoop installations since this 
+      allows them to reap benefits of economies of scale without creating 
+      private clusters.  However, organizations are concerned about sharing a 
+      cluster because they are worried about others using the resources that 
+      are critical for their SLAs.</p> 
+
+      <p>The CapacityScheduler is designed to allow sharing a large cluster 
+      while giving  each organization a minimum capacity guarantee. The central 
+      idea is that the available resources in the Hadoop Map-Reduce cluster are 
+      partitioned among multiple organizations who collectively fund the 
+      cluster based on computing needs. There is an added benefit that an 
+      organization can access any excess capacity no being used by others. This 
+      provides elasticity for the organizations in a cost-effective manner.</p> 
+
+      <p>Sharing clusters across organizations necessitates strong support for
+      multi-tenancy since each organization must be guaranteed capacity and 
+      safe-guards to ensure the shared cluster is impervious to single rouge 
+      job or user. The CapacityScheduler provides a stringent set of limits to 
+      ensure that a single job or user or queue cannot consume dispropotionate 
+      amount of resources in the cluster. Also, the JobTracker of the cluster,  
+      in particular, is a precious resource and the CapacityScheduler provides 
+      limits on initialized/pending tasks and jobs from a single user and queue 
+      to ensure fairness and stability of the cluster.</p> 
+
+      <p>The primary abstraction provided by the CapacityScheduler is the 
+      concept of <em>queues</em>. These queues are typically setup by administrators
+      to reflect the economics of the shared cluster.</p>
     </section>
     
     <section>
       <title>Features</title>
       
-      <p>The Capacity Scheduler supports the following features:</p> 
+      <p>The CapacityScheduler supports the following features:</p> 
       <ul>
         <li>
-          Support for multiple queues, where a job is submitted to a queue.
+          Capacity Guarantees - Support for multiple queues, where a job is 
+          submitted to a queue.Queues are allocated a fraction of the capacity 
+          of the grid in the sense that a certain capacity of resources will be 
+          at their disposal. All jobs submitted to a queue will have access to 
+          the capacity allocated to the queue. Adminstrators can configure soft 
+          limits and optional hard limits on the capacity allocated to each queue. 
         </li>
         <li>
-          Queues are allocated a fraction of the capacity of the grid in the 
-          sense that a certain capacity of resources will be at their 
-          disposal. All jobs submitted to a queue will have access to the 
-          capacity allocated to the queue.
+          Security - Each queue has strict ACLs which controls which users can 
+          submit jobs to individual queues. Also, there are safe-guards to 
+          ensure that users cannot view and/or modify jobs from other users if
+          so desired. Also, per-queue and system administrator roles are 
+          supported.
         </li>
         <li>
-          Free resources can be allocated to any queue beyond it's capacity. 
-          When there is demand for these resources from queues running below 
-          capacity at a future point in time, as tasks scheduled on these 
+          Elasticity - Free resources can be allocated to any queue beyond it's 
+          capacity. When there is demand for these resources from queues running 
+          below capacity at a future point in time, as tasks scheduled on these 
           resources complete, they will be assigned to jobs on queues 
-          running below the capacity.
+          running below the capacity. This ensures that resources are available 
+          in a predictable and elastic manner to queues, thus preventing 
+          artifical silos of resources in the cluster which helps utilization.
         </li>
         <li>
-          Queues optionally support job priorities (disabled by default).
+          Multi-tenancy - Comprehensive set of limits are provided to prevent 
+          a single job, user and queue from monpolizing resources of the queue 
+          or the cluster as a whole to ensure that the system, particularly the 
+          JobTracker, isn't overwhelmed by too many tasks or jobs. 
         </li>
         <li>
-          Within a queue, jobs with higher priority will have access to the 
-          queue's resources before jobs with lower priority. However, once a 
-          job is running, it will not be preempted for a higher priority job,
-          though new tasks from the higher priority job will be 
-          preferentially scheduled.
+          Operability - The queue definitions and properties can be changed, 
+          at runtime, by administrators in a secure manner to minimize 
+          disruption to users. Also, a console is provided for users and 
+          administrators to view current allocation of resources to various 
+          queues in the system.
         </li>
         <li>
-          In order to prevent one or more users from monopolizing its 
-          resources, each queue enforces a limit on the percentage of 
-          resources allocated to a user at any given time, if there is 
-          competition for them.  
+          Resource-based Scheduling - Support for resource-intensive jobs, 
+          wherein a job can optionally specify higher resource-requirements than 
+          the default, there-by accomodating applications with differing resource
+          requirements. Currently, memory is the the resource requirement 
+          supported.
         </li>
         <li>
-          Support for memory-intensive jobs, wherein a job can optionally 
-          specify higher memory-requirements than the default, and the tasks 
-          of the job will only be run on TaskTrackers that have enough memory 
-          to spare.
+          Job Priorities - Queues optionally support job priorities (disabled 
+          by default). Within a queue, jobs with higher priority will have 
+          access to the queue's resources before jobs with lower priority. 
+          However, once a job is running, it will not be preempted for a higher 
+          priority job, <em>premption</em> is on the roadmap is currently not 
+          supported.
         </li>
       </ul>
     </section>
     
     <section>
-      <title>Picking a task to run</title>
-      
-      <p>Note that many of these steps can be, and will be, enhanced over time
-      to provide better algorithms.</p>
-      
-      <p>Whenever a TaskTracker is free, the Capacity Scheduler picks 
-      a queue which has most free space (whose ratio of # of running slots to 
-      capacity is the lowest).</p>
-      
-      <p>Once a queue is selected, the Scheduler picks a job in the queue. Jobs
-      are sorted based on when they're submitted and their priorities (if the 
-      queue supports priorities). Jobs are considered in order, and a job is 
-      selected if its user is within the user-quota for the queue, i.e., the 
-      user is not already using queue resources above his/her limit. The 
-      Scheduler also makes sure that there is enough free memory in the 
-      TaskTracker to tun the job's task, in case the job has special memory
-      requirements.</p>
-      
-      <p>Once a job is selected, the Scheduler picks a task to run. This logic 
-      to pick a task remains unchanged from earlier versions.</p> 
-      
-    </section>
-    
-    <section>
       <title>Installation</title>
       
-        <p>The Capacity Scheduler is available as a JAR file in the Hadoop
+        <p>The CapacityScheduler is available as a JAR file in the Hadoop
         tarball under the <em>contrib/capacity-scheduler</em> directory. The name of 
-        the JAR file would be on the lines of hadoop-*-capacity-scheduler.jar.</p>
+        the JAR file would be on the lines of hadoop-capacity-scheduler-*.jar.</p>
         <p>You can also build the Scheduler from source by executing
         <em>ant package</em>, in which case it would be available under
         <em>build/contrib/capacity-scheduler</em>.</p>
-        <p>To run the Capacity Scheduler in your Hadoop installation, you need 
+        <p>To run the CapacityScheduler in your Hadoop installation, you need 
         to put it on the <em>CLASSPATH</em>. The easiest way is to copy the 
-        <code>hadoop-*-capacity-scheduler.jar</code> from 
+        <code>hadoop-capacity-scheduler-*.jar</code> from 
         to <code>HADOOP_HOME/lib</code>. Alternatively, you can modify 
         <em>HADOOP_CLASSPATH</em> to include this jar, in 
         <code>conf/hadoop-env.sh</code>.</p>
@@ -124,9 +158,9 @@
       <title>Configuration</title>
 
       <section>
-        <title>Using the Capacity Scheduler</title>
+        <title>Using the CapacityScheduler</title>
         <p>
-          To make the Hadoop framework use the Capacity Scheduler, set up
+          To make the Hadoop framework use the CapacityScheduler, set up
           the following property in the site configuration:</p>
           <table>
             <tr>
@@ -144,14 +178,22 @@
         <title>Setting up queues</title>
         <p>
           You can define multiple queues to which users can submit jobs with
-          the Capacity Scheduler. To define multiple queues, you should edit
-          the site configuration for Hadoop and modify the
-          <em>mapred.queue.names</em> property.
+          the CapacityScheduler. To define multiple queues, you should use the  
+          <em>mapred.queue.names</em> property in 
+          <code>conf/hadoop-site.xml</code>.
         </p>
+        
+        <p>
+          The CapacityScheduler can be configured with several properties
+          for each queue that control the behavior of the Scheduler. This
+          configuration is in the <em>conf/capacity-scheduler.xml</em>.
+        </p>
+        
         <p>
           You can also configure ACLs for controlling which users or groups
-          have access to the queues.
+          have access to the queues in <code>conf/mapred-queue-acls.xml</code>.
         </p>
+        
         <p>
           For more details, refer to
           <a href="cluster_setup.html#Configuring+the+Hadoop+Daemons">Cluster 
@@ -160,25 +202,12 @@
       </section>
   
       <section>
-        <title>Configuring properties for queues</title>
+        <title>Queue properties</title>
 
-        <p>The Capacity Scheduler can be configured with several properties
-        for each queue that control the behavior of the Scheduler. This
-        configuration is in the <em>conf/capacity-scheduler.xml</em>. By
-        default, the configuration is set up for one queue, named 
-        <em>default</em>.</p>
-        <p>To specify a property for a queue that is defined in the site
-        configuration, you should use the property name as
-        <em>mapred.capacity-scheduler.queue.&lt;queue-name&gt;.&lt;property-name&gt;</em>.
-        </p>
-        <p>For example, to define the property <em>capacity</em>
-        for queue named <em>research</em>, you should specify the property
-        name as 
-        <em>mapred.capacity-scheduler.queue.research.capacity</em>.
-        </p>
-
-        <p>The properties defined for queues and their descriptions are
-        listed in the table below:</p>
+        <section>
+        <title>Resource allocation</title>
+        <p>The properties defined for resource allocations to queues and their 
+        descriptions are listed in below:</p>
 
         <table>
           <tr><th>Name</th><th>Description</th></tr>
@@ -187,25 +216,8 @@
             to be available for jobs in this queue. The sum of capacities 
             for all queues should be less than or equal 100.</td>
           </tr>
-          <tr><td>mapred.capacity-scheduler.queue.&lt;queue-name&gt;.supports-priority</td>
-          	<td>If true, priorities of jobs will be taken into account in scheduling 
-          	decisions.</td>
-          </tr>
-          <tr><td>mapred.capacity-scheduler.queue.&lt;queue-name&gt;.minimum-user-limit-percent</td>
-          	<td>Each queue enforces a limit on the percentage of resources 
-          	allocated to a user at any given time, if there is competition 
-          	for them. This user limit can vary between a minimum and maximum 
-          	value. The former depends on the number of users who have submitted
-          	jobs, and the latter is set to this property value. For example, 
-          	suppose the value of this property is 25. If two users have 
-          	submitted jobs to a queue, no single user can use more than 50% 
-          	of the queue resources. If a third user submits a job, no single 
-          	user can use more than 33% of the queue resources. With 4 or more 
-          	users, no user can use more than 25% of the queue's resources. A 
-          	value of 100 implies no user limits are imposed.</td>
-          </tr>
           <tr><td>mapred.capacity-scheduler.queue.&lt;queue-name&gt;.maximum-capacity</td>
-          	<td>
+            <td>
                   maximum-capacity defines a limit beyond which a queue cannot
                   use the capacity of the cluster.This provides a means to limit
                   how much excess capacity a queue can use. By default, there
@@ -228,137 +240,175 @@
                   absolute terms would increase accordingly.
                 </td>
           </tr>
-        </table>
-      </section>
-      
-      <section>
-        <title>Memory management</title>
-      
-        <p>The Capacity Scheduler supports scheduling of tasks on a
-        <code>TaskTracker</code>(TT) based on a job's memory requirements
-        and the availability of RAM and Virtual Memory (VMEM) on the TT node.
-        See the <a href="mapred_tutorial.html#Memory+monitoring"> 
-        MapReduce Tutorial</a> for details on how the TT monitors
-        memory usage.</p>
-        <p>Currently the memory based scheduling is only supported
-        in Linux platform.</p>
-        <p>Memory-based scheduling works as follows:</p>
-        <ol>
-          <li>The absence of any one or more of three config parameters 
-          or -1 being set as value of any of the parameters, 
-          <code>mapred.tasktracker.vmem.reserved</code>, 
-          <code>mapred.task.default.maxvmem</code>, or
-          <code>mapred.task.limit.maxvmem</code>, disables memory-based
-          scheduling, just as it disables memory monitoring for a TT. These
-          config parameters are described in the 
-          <a href="mapred_tutorial.html#Memory+monitoring">MapReduce 
-          Tutorial</a>. The value of  
-          <code>mapred.tasktracker.vmem.reserved</code> is 
-          obtained from the TT via its heartbeat. 
-          </li>
-          <li>If all the three mandatory parameters are set, the Scheduler 
-          enables VMEM-based scheduling. First, the Scheduler computes the free
-          VMEM on the TT. This is the difference between the available VMEM on the
-          TT (the node's total VMEM minus the offset, both of which are sent by 
-          the TT on each heartbeat)and the sum of VMs already allocated to 
-          running tasks (i.e., sum of the VMEM task-limits). Next, the Scheduler
-          looks at the VMEM requirements for the job that's first in line to 
-          run. If the job's VMEM requirements are less than the available VMEM on 
-          the node, the job's task can be scheduled. If not, the Scheduler 
-          ensures that the TT does not get a task to run (provided the job 
-          has tasks to run). This way, the Scheduler ensures that jobs with 
-          high memory requirements are not starved, as eventually, the TT 
-          will have enough VMEM available. If the high-mem job does not have 
-          any task to run, the Scheduler moves on to the next job. 
-          </li>
-          <li>In addition to VMEM, the Capacity Scheduler can also consider 
-          RAM on the TT node. RAM is considered the same way as VMEM. TTs report
-          the total RAM available on their node, and an offset. If both are
-          set, the Scheduler computes the available RAM on the node. Next, 
-          the Scheduler figures out the RAM requirements of the job, if any. 
-          As with VMEM, users can optionally specify a RAM limit for their job
-          (<code>mapred.task.maxpmem</code>, described in the MapReduce 
-          Tutorial). The Scheduler also maintains a limit for this value 
-          (<code>mapred.capacity-scheduler.task.default-pmem-percentage-in-vmem</code>, 
-          described below). All these three values must be set for the 
-          Scheduler to schedule tasks based on RAM constraints.
-          </li>
-          <li>The Scheduler ensures that jobs cannot ask for RAM or VMEM higher
-          than configured limits. If this happens, the job is failed when it
-          is submitted. 
-          </li>
-        </ol>
-        
-        <p>As described above, the additional scheduler-based config 
-        parameters are as follows:</p>
-
-        <table>
-          <tr><th>Name</th><th>Description</th></tr>
-          <tr><td>mapred.capacity-scheduler.task.default-pmem-percentage-in-vmem</td>
-          	<td>A percentage of the default VMEM limit for jobs
-          	(<code>mapred.task.default.maxvmem</code>). This is the default 
-          	RAM task-limit associated with a task. Unless overridden by a 
-          	job's setting, this number defines the RAM task-limit.</td>
-          </tr>
-          <tr><td>mapred.capacity-scheduler.task.limit.maxpmem</td>
-          <td>Configuration which provides an upper limit to maximum physical
-           memory which can be specified by a job. If a job requires more 
-           physical memory than what is specified in this limit then the same
-           is rejected.</td>
+          <tr><td>mapred.capacity-scheduler.queue.&lt;queue-name&gt;.minimum-user-limit-percent</td>
+          	<td>Each queue enforces a limit on the percentage of resources 
+          	allocated to a user at any given time, if there is competition 
+          	for them. This user limit can vary between a minimum and maximum 
+          	value. The former depends on the number of users who have submitted
+          	jobs, and the latter is set to this property value. For example, 
+          	suppose the value of this property is 25. If two users have 
+          	submitted jobs to a queue, no single user can use more than 50% 
+          	of the queue resources. If a third user submits a job, no single 
+          	user can use more than 33% of the queue resources. With 4 or more 
+          	users, no user can use more than 25% of the queue's resources. A 
+          	value of 100 implies no user limits are imposed.</td>
+          </tr>
+          <tr><td>mapred.capacity-scheduler.queue.&lt;queue-name&gt;.user-limit-factor</td>
+            <td>The multiple of the queue capacity which can be configured to 
+              allow a single user to acquire more slots. By default this is set 
+              to 1 which ensure that a single user can never take more than the 
+              queue's configured capacity irrespective of how idle th cluster 
+              is.</td>
+          </tr>
+          <tr><td>mapred.capacity-scheduler.queue.&lt;queue-name&gt;.supports-priority</td>
+            <td>If true, priorities of jobs will be taken into account in scheduling 
+            decisions.</td>
           </tr>
         </table>
-      </section>
+   </section>
    <section>
-        <title>Job Initialization Parameters</title>
+        <title>Job initialization</title>
         <p>Capacity scheduler lazily initializes the jobs before they are
         scheduled, for reducing the memory footprint on jobtracker. 
-        Following are the parameters, by which you can control the laziness
-        of the job initialization. The following parameters can be 
-        configured in capacity-scheduler.xml
+        Following are the parameters, by which you can control the
+        initialization of jobs per-queue.
         </p>
         
         <table>
           <tr><th>Name</th><th>Description</th></tr>
           <tr>
             <td>
-              mapred.capacity-scheduler.queue.&lt;queue-name&gt;.maximum-initialized-jobs-per-user
+              mapred.capacity-scheduler.maximum-system-jobs
             </td>
             <td>
-              Maximum number of jobs which are allowed to be pre-initialized for
-              a particular user in the queue. Once a job is scheduled, i.e. 
-              it starts running, then that job is not considered
-              while scheduler computes the maximum job a user is allowed to
-              initialize. 
+              Maximum number of jobs in the system which can be initialized,
+              concurrently, by the CapacityScheduler.
+              
+              Individual queue limits on initialized jobs are directly 
+              proportional to their queue capacities.
             </td>
           </tr>
           <tr>
             <td>
-              mapred.capacity-scheduler.init-poll-interval
+              mapred.capacity-scheduler.queue.&lt;queue-name&gt;.maximum-initialized-active-tasks
             </td>
             <td>
-              Amount of time in miliseconds which is used to poll the scheduler
-              job queue to look for jobs to be initialized.
+              The maximum number of tasks, across all jobs in the queue, 
+              which can be initialized concurrently. Once the queue's jobs 
+              exceed this limit they will be queued on disk.             
             </td>
           </tr>
           <tr>
             <td>
-              mapred.capacity-scheduler.init-worker-threads
+              mapred.capacity-scheduler.queue.&lt;queue-name&gt;.maximum-initialized-active-tasks-per-user
             </td>
             <td>
-              Number of worker threads which would be used by Initialization
-              poller to initialize jobs in a set of queue. If number mentioned 
-              in property is equal to number of job queues then a thread is 
-              assigned jobs from one queue. If the number configured is lesser than
-              number of queues, then a thread can get jobs from more than one queue
-              which it initializes in a round robin fashion. If the number configured
-              is greater than number of queues, then number of threads spawned
-              would be equal to number of job queues.
+              The maximum number of tasks per-user, across all the of the
+              user's jobs in the queue, which can be initialized concurrently. 
+              Once the user's jobs exceed this limit they will be queued on disk.
             </td>
           </tr>
+          <tr>
+            <td> 
+              mapred.capacity-scheduler.queue.&lt;queue-name&gt;.init-accept-jobs-factor
+            </td>
+            <td>
+              The multipe of (maximum-system-jobs * queue-capacity) used to
+              determine the number of jobs which are accepted by the scheduler. 
+              The default value is 10. If number of jobs submitted to the queue
+              exceeds this limit, job submission are rejected. 
+            </td>
+          </tr> 
         </table>
       </section>   
+      </section>
+      
+      <section>
+        <title>Resource based scheduling</title>
+      
+        <p>The CapacityScheduler supports scheduling of tasks on a
+        <code>TaskTracker</code>(TT) based on a job's memory requirements
+        in terms of RAM and Virtual Memory (VMEM) on the TT node.
+        A TT is conceptually composed of a fixed number of map and reduce
+        slots with fixed slot size across the cluster. A job can ask for one
+        or more slots for each of its component map and/or reduce slots. If a
+        task consumes more memory than configured the TT forcibly kills the task.
+        </p>
+
+        <p>Currently the memory based scheduling is only supported
+        in Linux platform.</p>
+        
+        <p>Additional scheduler-based config 
+        parameters are as follows:</p>
+
+        <table>
+          <tr><th>Name</th><th>Description</th></tr>
+          <tr>
+            <td>mapred.cluster.map.memory.mb</td>
+          	 <td>The size, in terms of virtual memory, of a single map slot
+             in the Map-Reduce framework, used by the scheduler.
+             A job can ask for multiple slots for a single map task via
+             <code>mapred.job.map.memory.mb</code>, upto the limit specified by
+             <code>mapred.cluster.max.map.memory.mb</code>, if the scheduler 
+             supports the feature.
+             The value of -1 indicates that this feature is turned off.
+          	 </td>
+          </tr>
+          <tr>
+            <td>mapred.cluster.reduce.memory.mb</td>
+             <td>The size, in terms of virtual memory, of a single reduce slot
+             in the Map-Reduce framework, used by the scheduler.
+             A job can ask for multiple slots for a single reduce task via
+             <code>mapred.job.reduce.memory.mb</code>, upto the limit specified by
+             <code>mapred.cluster.max.reduce.memory.mb</code>, if the scheduler supports the 
+             feature.The value of -1 indicates that this feature is turned off.
+             </td>
+          </tr>
+          <tr>
+            <td>mapred.cluster.max.map.memory.mb</td>
+            <td>The maximum size, in terms of virtual memory, of a single map
+            task launched by the Map-Reduce framework, used by the scheduler.
+            A job can ask for multiple slots for a single map task via
+            <code>mapred.job.map.memory.mb</code>, upto the limit specified by
+            <code>mapred.cluster.max.map.memory.mb</code>, if the scheduler supports the 
+            feature. The value of -1 indicates that this feature is turned off.
+            </td>
+          </tr>
+          <tr>
+            <td>mapred.cluster.max.reduce.memory.mb</td>
+            <td>The maximum size, in terms of virtual memory, of a single reduce
+            task launched by the Map-Reduce framework, used by the scheduler.
+            A job can ask for multiple slots for a single reduce task via
+            <code>mapred.job.reduce.memory.mb</code>, upto the limit specified by
+            <code>mapred.cluster.max.reduce.memory.mb</code>, if the scheduler supports the 
+            feature. The value of -1 indicates that this feature is turned off.
+            </td>
+          </tr>
+          <tr>
+            <td>mapred.job.map.memory.mb</td>
+            <td>The size, in terms of virtual memory, of a single map task
+            for the job. A job can ask for multiple slots for a single map task, 
+            rounded up to the next multiple of <code>mapred.cluster.map.memory.mb</code> and 
+            upto the limit specified by <code>mapred.cluster.max.map.memory.mb</code>, 
+            if the scheduler supports the feature. The value of -1 indicates 
+            that this feature is turned off iff <code>mapred.cluster.map.memory.mb</code> is 
+            also turned off (-1).</td>
+          </tr>
+          <tr>
+            <td>mapred.job.reduce.memory.mb</td>
+            <td>The size, in terms of virtual memory, of a single reduce task
+            for the job. A job can ask for multiple slots for a single reduce task, 
+            rounded up to the next multiple of <code>mapred.cluster.reduce.memory.mb</code> and 
+            upto the limit specified by <code>mapred.cluster.max.reduce.memory.mb</code>, 
+            if the scheduler supports the feature. The value of -1 indicates 
+            that this feature is turned off iff <code>mapred.cluster.reduce.memory.mb</code> is 
+            also turned off (-1).</td>
+          </tr>
+        </table>
+      </section>
+      
       <section>
-        <title>Reviewing the configuration of the Capacity Scheduler</title>
+        <title>Reviewing the configuration of the CapacityScheduler</title>
         <p>
           Once the installation and configuration is completed, you can review
           it after starting the MapReduce cluster from the admin UI.
@@ -370,10 +420,218 @@
               Information</em> section of the page.</li>
           <li>The properties for the queues should be visible in the <em>Scheduling
               Information</em> column against each queue.</li>
+          <li>The /scheduler web-page should show the resource usages of 
+              individual queues.</li>
         </ul>
       </section>
       
    </section>
+
+  <section>
+    <title>Example</title>
+    <p>Here is a practical example for using CapacityScheduler:</p>
+    <table>
+    <tr>
+    <td>
+<code>&lt;?xml version="1.0"?&gt;</code><br/>
+<br/>
+<code>&lt;configuration&gt;</code><br/>
+<br/>
+<code>  &lt;!-- system limit, across all queues --&gt;</code><br/>
+<br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.maximum-system-jobs&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;3000&lt;/value&gt;</code><br/>
+<code>    &lt;description&gt;Maximum number of jobs in the system which can be initialized,</code><br/>
+<code>     concurrently, by the CapacityScheduler.</code><br/>
+<code>    &lt;/description&gt;    </code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code> </code><br/>
+<code>&lt;!-- queue: queueA --&gt;</code><br/>
+<code> &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueA.capacity&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;8&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueA.supports-priority&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;false&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueA.minimum-user-limit-percent&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;20&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueA.user-limit-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;10&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueA.maximum-initialized-active-tasks&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;200000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueA.maximum-initialized-active-tasks-per-user&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;100000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueA.init-accept-jobs-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;100&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<br/>
+<code>&lt;!-- queue: queueB --&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueB.capacity&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;2&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueB.supports-priority&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;false&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueB.minimum-user-limit-percent&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;20&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueB.user-limit-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;1&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueB.maximum-initialized-active-tasks&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;200000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueB.maximum-initialized-active-tasks-per-user&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;100000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueB.init-accept-jobs-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;10&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<br/>
+<code>&lt;!-- queue: queueC --&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueC.capacity&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;30&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueC.supports-priority&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;false&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueC.minimum-user-limit-percent&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;20&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueC.user-limit-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;1&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueC.maximum-initialized-active-tasks&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;200000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueC.maximum-initialized-active-tasks-per-user&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;100000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueC.init-accept-jobs-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;10&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<br/>
+<code>&lt;!-- queue: queueD --&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueD.capacity&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;1&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueD.supports-priority&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;false&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueD.minimum-user-limit-percent&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;20&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueD.user-limit-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;20&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueD.maximum-initialized-active-tasks&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;200000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueD.maximum-initialized-active-tasks-per-user&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;100000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueD.init-accept-jobs-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;10&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<br/>
+<code>&lt;!-- queue: queueE --&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueE.capacity&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;31&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueE.supports-priority&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;false&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueE.minimum-user-limit-percent&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;20&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueE.user-limit-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;1&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueE.maximum-initialized-active-tasks&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;200000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueE.maximum-initialized-active-tasks-per-user&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;100000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueE.init-accept-jobs-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;10&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<br/>
+<code>&lt;!-- queue: queueF --&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueF.capacity&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;28&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueF.supports-priority&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;false&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueF.minimum-user-limit-percent&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;20&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueF.user-limit-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;1&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueF.maximum-initialized-active-tasks&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;200000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueF.maximum-initialized-active-tasks-per-user&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;100000&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<code>  &lt;property&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;name&gt;mapred.capacity-scheduler.queue.queueF.init-accept-jobs-factor&lt;/name&gt;</code><br/>
+<code>    &nbsp;&nbsp;&lt;value&gt;10&lt;/value&gt;</code><br/>
+<code>  &lt;/property&gt;</code><br/>
+<br/>
+<code>&lt;/configuration&gt;</code><br/>
+    </td>
+    </tr>
+    </table>
+  </section>
   </body>
   
 </document>

Modified: hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/fair_scheduler.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/fair_scheduler.xml?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/fair_scheduler.xml (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/fair_scheduler.xml Tue May 10 20:52:35 2011
@@ -93,7 +93,7 @@
       <p>
         To run the fair scheduler in your Hadoop installation, you need to put
         it on the CLASSPATH. The easiest way is to copy the 
-        <em>hadoop-*-fairscheduler.jar</em> from
+        <em>hadoop-fairscheduler-*.jar</em> from
         <em>HADOOP_HOME/contrib/fairscheduler</em> to <em>HADOOP_HOME/lib</em>.
         Alternatively you can modify <em>HADOOP_CLASSPATH</em> to include this jar, in
         <em>HADOOP_CONF_DIR/hadoop-env.sh</em>
@@ -101,7 +101,7 @@
       <p>
         In order to compile fair scheduler, from sources execute <em> ant 
         package</em> in source folder and copy the 
-        <em>build/contrib/fair-scheduler/hadoop-*-fairscheduler.jar</em> 
+        <em>build/contrib/fair-scheduler/hadoop-fairscheduler-*.jar</em> 
         to <em>HADOOP_HOME/lib</em>
       </p>
       <p>

Modified: hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/single_node_setup.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/single_node_setup.xml?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/single_node_setup.xml (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/docs/src/documentation/content/xdocs/single_node_setup.xml Tue May 10 20:52:35 2011
@@ -146,7 +146,7 @@
         <code>$ mkdir input</code><br/>
         <code>$ cp conf/*.xml input</code><br/>
         <code>
-          $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
+          $ bin/hadoop jar hadoop-examples-*.jar grep input output 'dfs[a-z.]+'
         </code><br/>
         <code>$ cat output/*</code>
       </p>
@@ -252,7 +252,7 @@
         <p>
           Run some of the examples provided:<br/>
           <code>
-            $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
+            $ bin/hadoop jar hadoop-examples-*.jar grep input output 'dfs[a-z.]+'
           </code>
         </p>
         

Modified: hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordCount.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordCount.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordCount.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordCount.java Tue May 10 20:52:35 2011
@@ -34,7 +34,7 @@ import org.apache.hadoop.mapred.lib.aggr
  * text input files, breaks each line into words and counts them. The output is
  * a locally sorted list of words and the count of how often they occurred.
  * 
- * To run: bin/hadoop jar hadoop-*-examples.jar aggregatewordcount <i>in-dir</i>
+ * To run: bin/hadoop jar hadoop-examples-*.jar aggregatewordcount <i>in-dir</i>
  * <i>out-dir</i> <i>numOfReducers</i> textinputformat
  * 
  */

Modified: hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java Tue May 10 20:52:35 2011
@@ -32,7 +32,7 @@ import org.apache.hadoop.mapred.lib.aggr
  * This is an example Aggregated Hadoop Map/Reduce application. Computes the
  * histogram of the words in the input texts.
  * 
- * To run: bin/hadoop jar hadoop-*-examples.jar aggregatewordhist <i>in-dir</i>
+ * To run: bin/hadoop jar hadoop-examples-*.jar aggregatewordhist <i>in-dir</i>
  * <i>out-dir</i> <i>numOfReducers</i> textinputformat
  * 
  */

Modified: hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/dancing/package.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/dancing/package.html?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/dancing/package.html (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/dancing/package.html Tue May 10 20:52:35 2011
@@ -59,8 +59,8 @@ Both applications have been added to the
 run as:
 
 <pre>
-bin/hadoop jar hadoop-*-examples.jar pentomino pent-outdir
-bin/hadoop jar hadoop-*-examples.jar sudoku puzzle.txt
+bin/hadoop jar hadoop-examples-*.jar pentomino pent-outdir
+bin/hadoop jar hadoop-examples-*.jar sudoku puzzle.txt
 </pre>
 
 <p>

Modified: hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java Tue May 10 20:52:35 2011
@@ -55,7 +55,7 @@ import org.apache.hadoop.util.ToolRunner
  *
  * <p>
  * To run the program: 
- * <b>bin/hadoop jar hadoop-*-examples.jar teragen 10000000000 in-dir</b>
+ * <b>bin/hadoop jar hadoop-examples-*.jar teragen 10000000000 in-dir</b>
  */
 public class TeraGen extends Configured implements Tool {
 

Modified: hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java Tue May 10 20:52:35 2011
@@ -45,7 +45,7 @@ import org.apache.hadoop.util.ToolRunner
  * finish. 
  * <p>
  * To run the program: 
- * <b>bin/hadoop jar hadoop-*-examples.jar terasort in-dir out-dir</b>
+ * <b>bin/hadoop jar hadoop-examples-*.jar terasort in-dir out-dir</b>
  */
 public class TeraSort extends Configured implements Tool {
   private static final Log LOG = LogFactory.getLog(TeraSort.class);

Modified: hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java?rev=1101640&r1=1101639&r2=1101640&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java (original)
+++ hadoop/common/branches/branch-0.20-security-204/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java Tue May 10 20:52:35 2011
@@ -44,7 +44,7 @@ import org.apache.hadoop.util.ToolRunner
  * Any output from the reduce is problem report.
  * <p>
  * To run the program: 
- * <b>bin/hadoop jar hadoop-*-examples.jar teravalidate out-dir report-dir</b>
+ * <b>bin/hadoop jar hadoop-examples-*.jar teravalidate out-dir report-dir</b>
  * <p>
  * If there is any output, something is wrong and the output of the reduce
  * will have the problem report.



Mime
View raw message