hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ste...@apache.org
Subject svn commit: r885142 [2/6] - in /hadoop/common/branches/HADOOP-6194: ./ .eclipse.templates/ bin/ ivy/ lib/jdiff/ src/ src/contrib/ src/contrib/ec2/ src/docs/ src/docs/src/documentation/ src/docs/src/documentation/content/xdocs/ src/docs/src/documentatio...
Date Sat, 28 Nov 2009 19:53:40 GMT
Modified: hadoop/common/branches/HADOOP-6194/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/ivy.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/ivy.xml (original)
+++ hadoop/common/branches/HADOOP-6194/ivy.xml Sat Nov 28 19:53:33 2009
@@ -28,7 +28,7 @@
     <conf name="default" extends="master,runtime"/>
     <conf name="master" description="contains the artifact but no dependencies"/>
     <conf name="runtime" description="runtime but not the artifact"
-      extends="client,server,s3-server,kfs"/>
+      extends="client,server,s3-server,kfs,mandatory,jetty,ftp"/>
 
     <conf name="mandatory" description="contains the critical  dependencies"
       extends="commons-logging,log4j"/>
@@ -46,22 +46,19 @@
     <conf name="s3-server" description="dependencies for running on S3/EC2 infrastructure"
       extends="s3-client,server"/>
     <conf name="kfs" description="dependencies for KFS file system support"/>
-    <conf name="ftp" description="dependencies for workign with FTP filesytems"
+    <conf name="ftp" description="dependencies for workign with FTP filesytems" 
               extends="mandatory"/>
    <conf name="jetty" description="Jetty provides the in-VM HTTP daemon" extends="commons-logging"/>
 
+   <conf name="common" extends="runtime,mandatory,httpclient,ftp,jetty"
+		      description="common artifacts"/>
+    <!--Testing pulls in everything-->
+   <conf name="test" extends="master,common" description="the classpath needed to run tests"/>
+
     <!--Private configurations. -->
 
-    <conf name="common" visibility="private" extends="runtime,mandatory,httpclient,ftp,jetty"
-		      description="common artifacts"/>
     <conf name="javadoc" visibility="private" description="artiracts required while performing doc generation"
       extends="common,mandatory,jetty,lucene"/>
-    <!--Testing pulls in everything-->
-    <conf name="test" extends="common,s3-server,kfs" visibility="private"
-      description="the classpath needed to run tests"/>
-
-    <conf name="test-hdfswithmr" extends="test" visibility="private"
-      description="the classpath needed to run tests"/>
 
     <conf name="releaseaudit" visibility="private"
 	description="Artifacts required for releaseaudit target"/>
@@ -273,15 +270,28 @@
     <dependency org="org.apache.hadoop"
       name="avro"
       rev="${avro.version}"
-      conf="mandatory->default"/>
+      conf="common->default"/>
     <dependency org="org.codehaus.jackson"
       name="jackson-mapper-asl"
-      rev="${jackson-mapper-asl.version}"
-      conf="mandatory->default"/>
+      rev="${jackson.version}"
+      conf="common->default"/> 
     <dependency org="com.thoughtworks.paranamer"
       name="paranamer"
       rev="${paranamer.version}"
-      conf="mandatory->default"/>
+      conf="common->default"/>
+    <dependency org="com.thoughtworks.paranamer"
+      name="paranamer-ant"
+      rev="${paranamer.version}"
+      conf="common->default"/>
+    <dependency org="org.aspectj"
+      name="aspectjrt"
+      rev="${aspectj.version}"
+      conf="common->default">
+    </dependency>
+    <dependency org="org.aspectj"
+      name="aspectjtools"
+      rev="${aspectj.version}"
+      conf="common->default">
+    </dependency>
     </dependencies>
-  
 </ivy-module>

Modified: hadoop/common/branches/HADOOP-6194/ivy/ivysettings.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/ivy/ivysettings.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/ivy/ivysettings.xml (original)
+++ hadoop/common/branches/HADOOP-6194/ivy/ivysettings.xml Sat Nov 28 19:53:33 2009
@@ -1,5 +1,4 @@
 <ivysettings>
-
  <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,65 +16,20 @@
    limitations under the License.
 -->
 
- <!--
-  see http://www.jayasoft.org/ivy/doc/configuration
-  -->
-  <!-- you can override this property to use mirrors
-          http://repo1.maven.org/maven2/
-          http://mirrors.dotsrc.org/maven2
-          http://ftp.ggi-project.org/pub/packages/maven2
-          http://mirrors.sunsite.dk/maven2
-          http://public.planetmirror.com/pub/maven2
-          http://ibiblio.lsu.edu/main/pub/packages/maven2
-          http://www.ibiblio.net/pub/packages/maven2
-  -->
-  <property name="repo.maven.org"
-    value="http://repo1.maven.org/maven2/"
-    override="false"/>
-  <property name="snapshot.apache.org"
-    value="http://people.apache.org/repo/m2-snapshot-repository/"
-    override="false"/>
-  <property name="maven2.pattern"
-    value="[organisation]/[module]/[revision]/[module]-[revision]"/>
-  <property name="maven2.pattern.ext"
-    value="${maven2.pattern}.[ext]"/>
-  <!-- pull in the local repository -->
-  <include url="${ivy.default.conf.dir}/ivyconf-local.xml"/>
-  <settings defaultResolver="default"/>
+  <property name="repo.maven.org" value="http://repo1.maven.org/maven2/" override="false"/>
+
+  <property name="maven2.pattern" value="[organisation]/[module]/[revision]/[module]-[revision]"/>
+  <property name="maven2.pattern.ext" value="${maven2.pattern}.[ext]"/>
+      <!-- pull in the local repository -->
+ <include url="${ivy.default.conf.dir}/ivyconf-local.xml"/> 
+ <settings defaultResolver="default"/>
   <resolvers>
-    <ibiblio name="maven2"
-      root="${repo.maven.org}"
-      pattern="${maven2.pattern.ext}"
-      m2compatible="true"
-      />
-    <ibiblio name="apache-snapshot"
-      root="${snapshot.apache.org}"
-      pattern="${maven2.pattern.ext}"
-      m2compatible="true"
-      />
+    <!--ibiblio resolvers-->
+    <ibiblio name="maven2" root="${repo.maven.org}" m2compatible="true"/>
+
     <chain name="default" dual="true">
-      <resolver ref="local"/>
-      <resolver ref="maven2"/>
-    </chain>
-    <chain name="internal">
-      <resolver ref="local"/>
-    </chain>
-    <chain name="external">
       <resolver ref="maven2"/>
     </chain>
-    <chain name="external-and-snapshots">
-      <resolver ref="maven2"/>
-      <resolver ref="apache-snapshot"/>
-    </chain>
   </resolvers>
-  <modules>
-    <!--
-    This forces a requirement for other hadoop-artifacts to be built locally
-    rather than look for them online.
-
-    -->
-    <module organisation="org.apache.hadoop" name="Hadoop.*" resolver="internal"/>
-    <!--until commons cli is external, we need to pull it in from the snapshot repository -if present -->
-    <module organisation="org.apache.commons" name=".*" resolver="external-and-snapshots"/>
-  </modules>
+
 </ivysettings>

Modified: hadoop/common/branches/HADOOP-6194/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/ivy/libraries.properties?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/ivy/libraries.properties (original)
+++ hadoop/common/branches/HADOOP-6194/ivy/libraries.properties Sat Nov 28 19:53:33 2009
@@ -14,8 +14,11 @@
 #It drives ivy and the generation of a maven POM
 
 #These are the versions of our dependencies (in alphabetical order)
-apacheant.version=1.7.0
+apacheant.version=1.7.1
+ant-task.version=2.0.10
 avro.version=1.0.0
+avro.version=1.2.0
+
 checkstyle.version=4.2
 
 commons-cli.version=1.2
@@ -24,8 +27,8 @@
 commons-collections.version=3.1
 commons-httpclient.version=3.0.1
 commons-lang.version=2.4
-commons-logging.version=1.0.4
-commons-logging-api.version=1.0.4
+commons-logging.version=1.1.1
+commons-logging-api.version=1.1
 commons-el.version=1.0
 commons-fileupload.version=1.2
 commons-io.version=1.4
@@ -39,9 +42,10 @@
 
 hsqldb.version=1.8.0.10
 
-#ivy.version=2.0.0-beta2
-ivy.version=2.0.0-rc2
+ivy.version=2.1.0-rc1
 jackson-mapper-asl.version=1.0.1
+jackson.version=1.0.1
+
 jasper.version=5.5.12
 jsp.version=2.1
 jsp-api.version=5.5.12
@@ -61,6 +65,8 @@
 
 oro.version=2.0.8
 paranamer.version=1.5
+paranamer.version=1.5
+
 rats-lib.version=0.6
 
 servlet.version=4.0.6
@@ -71,3 +77,5 @@
 
 xmlenc.version=0.52
 xerces.version=1.4.4
+
+aspectj.version=1.6.5

Modified: hadoop/common/branches/HADOOP-6194/ivybuild.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/ivybuild.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/ivybuild.xml (original)
+++ hadoop/common/branches/HADOOP-6194/ivybuild.xml Sat Nov 28 19:53:33 2009
@@ -1,5 +1,5 @@
 <?xml version="1.0"?>
-<project name="hadoop-core" default="published"
+<project name="hadoop-common-ivy" default="published"
   xmlns:ivy="antlib:org.apache.ivy.ant">
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
@@ -51,7 +51,7 @@
       value="[conf]/[artifact]-[revision].[ext]"/>
     <!--this is how artifacts that get built are named-->
     <property name="ivy.publish.pattern"
-      value="hadoop--core-[revision].[ext]"/>
+      value="hadoop-common-[revision].[ext]"/>
     <property name="hadoop.jar"
       location="${build.dir}/hadoop-${module}-${hadoop.version}.jar" />
 
@@ -407,4 +407,4 @@
     </svn>
   </target>
 
-</project>
\ No newline at end of file
+</project>

Modified: hadoop/common/branches/HADOOP-6194/src/contrib/build-contrib.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/contrib/build-contrib.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/contrib/build-contrib.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/contrib/build-contrib.xml Sat Nov 28 19:53:33 2009
@@ -22,6 +22,7 @@
 <project name="hadoopbuildcontrib" xmlns:ivy="antlib:org.apache.ivy.ant">
 
   <property name="name" value="${ant.project.name}"/>
+  <dirname property="src.contrib.dir" file="${ant.file.hadoopbuildcontrib}" />
   <property name="root" value="${basedir}"/>
 
   <!-- Load all the default properties, and any the user wants    -->
@@ -29,7 +30,7 @@
   <property file="${user.home}/${name}.build.properties" />
   <property file="${root}/build.properties" />
 
-  <property name="hadoop.root" location="${root}/../../../"/>
+  <property name="hadoop.root" location="${src.contrib.dir}/../../"/>
   <property name="src.dir"  location="${root}/src/java"/>
   <property name="src.test" location="${root}/src/test"/>
   <property name="src.examples" location="${root}/src/examples"/>

Propchange: hadoop/common/branches/HADOOP-6194/src/contrib/ec2/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Nov 28 19:53:33 2009
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/src/contrib/ec2:804966-807681
+/hadoop/common/trunk/src/contrib/ec2:804966-884903
 /hadoop/core/branches/branch-0.19/core/src/contrib/ec2:713112
 /hadoop/core/trunk/src/contrib/ec2:776175-784663

Propchange: hadoop/common/branches/HADOOP-6194/src/docs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Nov 28 19:53:33 2009
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/src/docs:804966-807681
+/hadoop/common/trunk/src/docs:804966-884903
 /hadoop/core/branches/HADOOP-4687/core/src/docs:776175-786719
 /hadoop/core/branches/branch-0.19/src/docs:713112

Modified: hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/cluster_setup.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/cluster_setup.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/cluster_setup.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/cluster_setup.xml Sat Nov 28 19:53:33 2009
@@ -33,20 +33,20 @@
       Hadoop clusters ranging from a few nodes to extremely large clusters with 
       thousands of nodes.</p>
       <p>
-      To play with Hadoop, you may first want to install Hadoop on a single machine (see <a href="quickstart.html"> Hadoop Quick Start</a>).
+      To play with Hadoop, you may first want to install Hadoop on a single machine (see <a href="single_node_setup.html"> Single Node Setup</a>).
       </p>
     </section>
     
     <section>
-      <title>Pre-requisites</title>
+      <title>Prerequisites</title>
       
       <ol>
         <li>
-          Make sure all <a href="quickstart.html#PreReqs">requisite</a> software 
+          Make sure all <a href="single_node_setup.html#PreReqs">required software</a> 
           is installed on all nodes in your cluster.
         </li>
         <li>
-          <a href="quickstart.html#Download">Get</a> the Hadoop software.
+          <a href="single_node_setup.html#Download">Download</a> the Hadoop software.
         </li>
       </ol>
     </section>
@@ -81,7 +81,7 @@
         <ol>
           <li>
             Read-only default configuration - 
-            <a href="ext:core-default">src/core/core-default.xml</a>, 
+            <a href="ext:common-default">src/common/common-default.xml</a>, 
             <a href="ext:hdfs-default">src/hdfs/hdfs-default.xml</a> and 
             <a href="ext:mapred-default">src/mapred/mapred-default.xml</a>.
           </li>
@@ -94,8 +94,8 @@
         </ol>
       
         <p>To learn more about how the Hadoop framework is controlled by these 
-        configuration files, look 
-        <a href="ext:api/org/apache/hadoop/conf/configuration">here</a>.</p>
+        configuration files see
+        <a href="ext:api/org/apache/hadoop/conf/configuration">Class Configuration</a>.</p>
       
         <p>Additionally, you can control the Hadoop scripts found in the 
         <code>bin/</code> directory of the distribution, by setting site-specific 
@@ -271,16 +271,6 @@
 		        TaskTrackers.
 		      </td>
   		    </tr>
-		  </table>
-      
-      <p><br/><code> conf/mapred-queues.xml</code></p>
-      
-      <table>
-       <tr>
-          <th>Parameter</th>
-          <th>Value</th> 
-          <th>Notes</th>
-       </tr>
         <tr>
           <td>mapred.queue.names</td>
           <td>Comma separated list of queues to which jobs can be submitted.</td>
@@ -289,8 +279,8 @@
             with the name as <em>default</em>. Hence, this parameter's
             value should always contain the string <em>default</em>.
             Some job schedulers supported in Hadoop, like the 
-            <a href="capacity_scheduler.html">Capacity 
-            Scheduler</a>, support multiple queues. If such a scheduler is
+            <a href="http://hadoop.apache.org/mapreduce/docs/current/capacity_scheduler.html">Capacity Scheduler</a>, 
+            support multiple queues. If such a scheduler is
             being used, the list of configured queue names must be
             specified here. Once queues are defined, users can submit
             jobs to a queue using the property name 
@@ -313,6 +303,16 @@
             <em>mapred.queue.queue-name.acl-name</em>, defined below.
           </td>
         </tr>
+		  </table>
+      
+      <p><br/><code> conf/mapred-queue-acls.xml</code></p>
+      
+      <table>
+       <tr>
+          <th>Parameter</th>
+          <th>Value</th> 
+          <th>Notes</th>
+       </tr>
         <tr>
           <td>mapred.queue.<em>queue-name</em>.acl-submit-job</td>
           <td>List of users and groups that can submit jobs to the
@@ -340,15 +340,6 @@
             his/her own job, irrespective of the ACLs.
           </td>
         </tr>
-        <tr>
-          <td>mapred.queue.<em>queue-name</em>.state</td>
-          <td>Specifies whether <em>queue-name</em> is running or stopped</td> 
-          <td>
-            Jobs can be submitted to a queue only if it is in the 
-            <em>running</em> state. However, jobs which are already running
-            when a queue is stopped will be allowed to finish.
-          </td>
-        </tr>
       </table>
       
 
@@ -401,10 +392,18 @@
                   </tr>
                   <tr>
                     <td>conf/mapred-site.xml</td>
-                    <td>mapred.child.java.opts</td>
+                    <td>mapred.map.child.java.opts</td>
                     <td>-Xmx512M</td>
                     <td>
-                      Larger heap-size for child jvms of maps/reduces. 
+                      Larger heap-size for child jvms of maps. 
+                    </td>
+                  </tr>
+                  <tr>
+                    <td>conf/mapred-site.xml</td>
+                    <td>mapred.reduce.child.java.opts</td>
+                    <td>-Xmx512M</td>
+                    <td>
+                      Larger heap-size for child jvms of reduces. 
                     </td>
                   </tr>
                   <tr>
@@ -474,9 +473,17 @@
                   </tr>
                   <tr>
                     <td>conf/mapred-site.xml</td>
-                    <td>mapred.child.java.opts</td>
+                    <td>mapred.map.child.java.opts</td>
+                    <td>-Xmx512M</td>
+                    <td>
+                      Larger heap-size for child jvms of maps. 
+                    </td>
+                  </tr>
+                  <tr>
+                    <td>conf/mapred-site.xml</td>
+                    <td>mapred.reduce.child.java.opts</td>
                     <td>-Xmx1024M</td>
-                    <td>Larger heap-size for child jvms of maps/reduces.</td>
+                    <td>Larger heap-size for child jvms of reduces.</td>
                   </tr>
                 </table>
               </li>
@@ -486,18 +493,18 @@
         <title> Memory management</title>
         <p>Users/admins can also specify the maximum virtual memory 
         of the launched child-task, and any sub-process it launches 
-        recursively, using <code>mapred.child.ulimit</code>. Note that
-        the value set here is a per process limit.
-        The value for <code>mapred.child.ulimit</code> should be specified 
-        in kilo bytes (KB). And also the value must be greater than
+        recursively, using <code>mapred.{map|reduce}.child.ulimit</code>. Note 
+        that the value set here is a per process limit.
+        The value for <code>mapred.{map|reduce}.child.ulimit</code> should be 
+        specified in kilo bytes (KB). And also the value must be greater than
         or equal to the -Xmx passed to JavaVM, else the VM might not start. 
         </p>
         
         <p>Note: <code>mapred.child.java.opts</code> are used only for 
         configuring the launched child tasks from task tracker. Configuring 
-        the memory options for daemons is documented in 
+        the memory options for daemons is documented under 
         <a href="cluster_setup.html#Configuring+the+Environment+of+the+Hadoop+Daemons">
-        cluster_setup.html </a></p>
+        Configuring the Environment of the Hadoop Daemons</a>.</p>
         
         <p>The memory available to some parts of the framework is also
         configurable. In map and reduce tasks, performance may be influenced
@@ -658,11 +665,13 @@
             distribution. The task tracker uses this executable to 
             launch and kill tasks. The setuid executable switches to
             the user who has submitted the job and launches or kills
-            the tasks. Currently, this task controller 
-            opens up permissions to local files and directories used 
-            by the tasks such as the job jar files, distributed archive 
-            files, intermediate files and task log files. In future,
-            it is expected that stricter file permissions are used.
+            the tasks. For maximum security, this task controller 
+            sets up restricted permissions and user/group ownership of
+            local files and directories used by the tasks such as the
+            job jar files, intermediate files and task log files. Currently
+            permissions on distributed cache files are opened up to be
+            accessible by all users. In future, it is expected that stricter
+            file permissions are set for these files too.
             </td>
             </tr>
             </table>
@@ -704,18 +713,32 @@
             </p>
             
             <p>
-            The executable must be deployed as a setuid executable, by changing
-            the ownership to <em>root</em>, group ownership to that of tasktracker
-            and giving it permissions <em>4510</em>.Please take a note that,
-            group which owns task-controller should contain only tasktracker
-            as its memeber and not users who submit jobs.
+            The executable must have specific permissions as follows. The
+            executable should have <em>6050 or --Sr-s---</em> permissions
+            user-owned by root(super-user) and group-owned by a group 
+            of which only the TaskTracker's user is the sole group member. 
+            For example, let's say that the TaskTracker is run as user
+            <em>mapred</em> who is part of the groups <em>users</em> and
+            <em>mapredGroup</em> any of them being the primary group.
+            Let also be that <em>users</em> has both <em>mapred</em> and
+            another user <em>X</em> as its members, while <em>mapredGroup</em>
+            has only <em>mapred</em> as its member. Going by the above
+            description, the setuid/setgid executable should be set
+            <em>6050 or --Sr-s---</em> with user-owner as <em>mapred</em> and
+            group-owner as <em>mapredGroup</em> which has
+            only <em>mapred</em> as its member(and not <em>users</em> which has
+            <em>X</em> also as its member besides <em>mapred</em>).
             </p>
             
             <p>The executable requires a configuration file called 
             <em>taskcontroller.cfg</em> to be
             present in the configuration directory passed to the ant target 
             mentioned above. If the binary was not built with a specific 
-            conf directory, the path defaults to <em>/path-to-binary/../conf</em>.
+            conf directory, the path defaults to
+            <em>/path-to-binary/../conf</em>. The configuration file must be
+            owned by the user running TaskTracker (user <em>mapred</em> in the
+            above example), group-owned by anyone and should have the
+            permissions <em>0400 or r--------</em>.
             </p>
             
             <p>The executable requires following configuration items to be 
@@ -730,17 +753,81 @@
             validate paths passed to the setuid executable in order to prevent
             arbitrary paths being passed to it.</td>
             </tr>
+            <tr>
+            <td>hadoop.log.dir</td>
+            <td>Path to hadoop log directory. Should be same as the value which
+            the TaskTracker is started with. This is required to set proper
+            permissions on the log files so that they can be written to by the user's
+            tasks and read by the TaskTracker for serving on the web UI.</td>
+            </tr>
             </table>
 
             <p>
-            The LinuxTaskController requires that paths leading up to
+            The LinuxTaskController requires that paths including and leading up to
             the directories specified in
-            <em>mapred.local.dir</em> and <em>hadoop.log.dir</em> to be 755
-            and directories themselves having 777 permissions.
+            <em>mapred.local.dir</em> and <em>hadoop.log.dir</em> to be set 755
+            permissions.
             </p>
             </section>
             
           </section>
+          <section>
+            <title>Monitoring Health of TaskTracker Nodes</title>
+            <p>Hadoop Map/Reduce provides a mechanism by which administrators 
+            can configure the TaskTracker to run an administrator supplied
+            script periodically to determine if a node is healthy or not.
+            Administrators can determine if the node is in a healthy state
+            by performing any checks of their choice in the script. If the
+            script detects the node to be in an unhealthy state, it must print
+            a line to standard output beginning with the string <em>ERROR</em>.
+            The TaskTracker spawns the script periodically and checks its 
+            output. If the script's output contains the string <em>ERROR</em>,
+            as described above, the node's status is reported as 'unhealthy'
+            and the node is black-listed on the JobTracker. No further tasks 
+            will be assigned to this node. However, the
+            TaskTracker continues to run the script, so that if the node
+            becomes healthy again, it will be removed from the blacklisted
+            nodes on the JobTracker automatically. The node's health
+            along with the output of the script, if it is unhealthy, is
+            available to the administrator in the JobTracker's web interface.
+            The time since the node was healthy is also displayed on the 
+            web interface.
+            </p>
+            
+            <section>
+            <title>Configuring the Node Health Check Script</title>
+            <p>The following parameters can be used to control the node health 
+            monitoring script in <em>mapred-site.xml</em>.</p>
+            <table>
+            <tr><th>Name</th><th>Description</th></tr>
+            <tr><td><code>mapred.healthChecker.script.path</code></td>
+            <td>Absolute path to the script which is periodically run by the 
+            TaskTracker to determine if the node is 
+            healthy or not. The file should be executable by the TaskTracker.
+            If the value of this key is empty or the file does 
+            not exist or is not executable, node health monitoring
+            is not started.</td>
+            </tr>
+            <tr>
+            <td><code>mapred.healthChecker.interval</code></td>
+            <td>Frequency at which the node health script is run, 
+            in milliseconds</td>
+            </tr>
+            <tr>
+            <td><code>mapred.healthChecker.script.timeout</code></td>
+            <td>Time after which the node health script will be killed by
+            the TaskTracker if unresponsive.
+            The node is marked unhealthy. if node health script times out.</td>
+            </tr>
+            <tr>
+            <td><code>mapred.healthChecker.script.args</code></td>
+            <td>Extra arguments that can be passed to the node health script 
+            when launched.
+            These should be comma separated list of arguments. </td>
+            </tr>
+            </table>
+            </section>
+          </section>
           
         </section>
         

Modified: hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml Sat Nov 28 19:53:33 2009
@@ -178,9 +178,9 @@
 	<dd>
 		The choice of initial mode during upgrade. The <em>x</em> permission is <em>never</em> set for files. For configuration files, the decimal value <em>511<sub>10</sub></em> may be used.
 	</dd>
-	<dt><code>dfs.umask = 022</code></dt>
+	<dt><code>dfs.umaskmode = 022</code></dt>
 	<dd>
-		The <code>umask</code> used when creating files and directories. For configuration files, the decimal value <em>18<sub>10</sub></em> may be used.
+		The <code>umask</code> used when creating files and directories. May be specified either via three octal digits or symbolic values, with the same constraints as the dfs chmod command.
 	</dd>
 </dl>
 </section>

Modified: hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/index.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/index.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/index.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/index.xml Sat Nov 28 19:53:33 2009
@@ -26,12 +26,23 @@
   
   <body>
   <p>
-  The Hadoop Documentation provides the information you need to get started using Hadoop, the Hadoop Distributed File System (HDFS), and Hadoop on Demand (HOD).
-  </p><p>
-Begin with the <a href="quickstart.html">Hadoop Quick Start</a> which shows you how to set up a single-node Hadoop installation. Then move on to the <a href="cluster_setup.html">Hadoop Cluster Setup</a> to learn how to set up a multi-node Hadoop installation. Once your Hadoop installation is in place, try out the <a href="mapred_tutorial.html">Hadoop Map/Reduce Tutorial</a>. 
-  </p><p>
-If you have more questions, you can ask on the <a href="ext:lists">Hadoop Core Mailing Lists</a> or browse the <a href="ext:archive">Mailing List Archives</a>.
-    </p>
+The Hadoop Common Documentation describes the common utilities and libraries that support the other Hadoop subprojects.  
+  </p>
+  <p>
+The Hadoop Common Documentation also includes the information you need to get started using Hadoop. 
+Begin with the Hadoop <a href="single_node_setup.html">Single Node Setup</a> which shows you how to set up a single-node Hadoop installation. 
+Then move on to the Hadoop <a href="cluster_setup.html">Cluster Setup</a> to learn how to set up a multi-node Hadoop installation. 
+</p>
+ <p>
+   Cluster environments commonly work in tandem with MapReduce applications and distributed file systems. 
+   For information about MapReduce see the 
+ <a href="http://hadoop.apache.org/mapreduce/docs/current/index.html">MapReduce Documentation</a>.
+   For information about the Hadoop Distributed File System (HDFS) see the 
+ <a href="http://hadoop.apache.org/hdfs/docs/current/index.html">HDFS Documentation</a>.
+  </p>  
+<p>
+If you have more questions, you can ask on the <a href="ext:lists">Hadoop Common Mailing Lists</a> or browse the <a href="ext:archive">Mailing List Archives</a>.
+</p>
   </body>
   
 </document>

Modified: hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/native_libraries.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/native_libraries.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/native_libraries.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/native_libraries.xml Sat Nov 28 19:53:33 2009
@@ -26,90 +26,82 @@
   
   <body>
   
+  <section>
+  <title>Overview</title>
+  
+<p>This guide describes the native hadoop library and includes a small discussion about native shared libraries.</p>
+
+      <p><strong>Note:</strong> Depending on your environment, the term "native libraries" <em>could</em> 
+      refer to all *.so's you need to compile; and, the term "native compression" <em>could</em> refer to all *.so's 
+      you need to compile that are specifically related to compression.
+      Currently, however, this document only addresses the native hadoop library (<em>libhadoop.so</em>).</p>
+  
+  </section>
+  
     <section>
-      <title>Purpose</title>
-      
-      <p>Hadoop has native implementations of certain components for reasons of 
-      both performance and non-availability of Java implementations. These 
-      components are available in a single, dynamically-linked, native library. 
-      On the *nix platform it is <em>libhadoop.so</em>. This document describes 
-      the usage and details on how to build the native libraries.</p>
-    </section>
-    
-    <section>
-      <title>Components</title>
-      
-      <p>Hadoop currently has the following 
-      <a href="ext:api/org/apache/hadoop/io/compress/compressioncodec">
-      compression codecs</a> as the native components:</p>
-      <ul>
-        <li><a href="ext:zlib">zlib</a></li>
-        <li><a href="ext:gzip">gzip</a></li>
-        <li><a href="ext:bzip">bzip2</a></li>
-      </ul>
+      <title>Native Hadoop Library </title>
       
-      <p>Of the above, the availability of native hadoop libraries is imperative 
-      for the gzip and bzip2 compression codecs to work.</p>
-    </section>
-
+      <p>Hadoop has native implementations of certain components for  
+      performance reasons and for non-availability of Java implementations. These 
+      components are available in a single, dynamically-linked native library called
+       the native hadoop library. On the *nix platforms the library is named <em>libhadoop.so</em>. </p>
+   
     <section>
       <title>Usage</title>
       
-      <p>It is fairly simple to use the native hadoop libraries:</p>
+      <p>It is fairly easy to use the native hadoop library:</p>
 
-      <ul>
+      <ol>
+              <li>
+          Review the <a href="#Components">components</a>.
+        </li>
         <li>
-          Take a look at the 
-          <a href="#Supported+Platforms">supported platforms</a>.
+          Review the <a href="#Supported+Platforms">supported platforms</a>.
         </li>
         <li>
-          Either <a href="ext:releases/download">download</a> the pre-built 
-          32-bit i386-Linux native hadoop libraries (available as part of hadoop 
-          distribution in <code>lib/native</code> directory) or 
-          <a href="#Building+Native+Hadoop+Libraries">build</a> them yourself.
+          Either <a href="#Download">download</a> a hadoop release, which will 
+          include a pre-built version of the native hadoop library, or
+          <a href="#Build">build</a> your own version of the 
+          native hadoop library. Whether you download or build, the name for the library is 
+          the same: <em>libhadoop.so</em>
         </li>
         <li>
-          Make sure you have any of or all of <strong>&gt;zlib-1.2</strong>,
-          <strong>&gt;gzip-1.2</strong>, and <strong>&gt;bzip2-1.0</strong>
-          packages for your platform installed; 
-          depending on your needs.
+          Install the compression codec development packages 
+          (<strong>&gt;zlib-1.2</strong>, <strong>&gt;gzip-1.2</strong>):
+          <ul>
+              <li>If you download the library, install one or more development packages - 
+              whichever compression codecs you want to use with your deployment.</li>
+              <li>If you build the library, it is <strong>mandatory</strong> 
+              to install both development packages.</li>
+          </ul>
         </li>
-      </ul>
-      
-      <p>The <code>bin/hadoop</code> script ensures that the native hadoop 
-      library is on the library path via the system property 
-      <em>-Djava.library.path=&lt;path&gt;</em>.</p>
-
-      <p>To check everything went alright check the hadoop log files for:</p>
-      
-      <p>
-        <code>
-          DEBUG util.NativeCodeLoader - Trying to load the custom-built 
-          native-hadoop library... 
-        </code><br/>
-        <code>
-          INFO  util.NativeCodeLoader - Loaded the native-hadoop library
-        </code>
-      </p>
-
-      <p>If something goes wrong, then:</p>
-      <p>
-        <code>
-          INFO util.NativeCodeLoader - Unable to load native-hadoop library for 
-          your platform... using builtin-java classes where applicable
-        </code>
+         <li>
+          Check the <a href="#Runtime">runtime</a> log files.
+        </li>
+      </ol>
+     </section>
+    <section>
+      <title>Components</title>
+     <p>The native hadoop library includes two components, the zlib and gzip 
+      <a href="http://hadoop.apache.org/common/docs/current/api/org/apache/hadoop/io/compress/CompressionCodec.html"> 
+      compression codecs</a>:
       </p>
+      <ul>
+        <li><a href="ext:zlib">zlib</a></li>
+        <li><a href="ext:gzip">gzip</a></li>
+      </ul>
+      <p>The native hadoop library is imperative for gzip to work.</p>
     </section>
     
     <section>
       <title>Supported Platforms</title>
       
-      <p>Hadoop native library is supported only on *nix platforms only.
-      Unfortunately it is known not to work on <a href="ext:cygwin">Cygwin</a> 
-      and <a href="ext:osx">Mac OS X</a> and has mainly been used on the 
-      GNU/Linux platform.</p>
+      <p>The native hadoop library is supported on *nix platforms only.
+      The library does not to work with <a href="ext:cygwin">Cygwin</a> 
+      or the <a href="ext:osx">Mac OS X</a> platform.</p>
 
-      <p>It has been tested on the following GNU/Linux distributions:</p>
+      <p>The native hadoop library is mainly used on the GNU/Linus platform and 
+      has been tested on these distributions:</p>
       <ul>
         <li>
           <a href="http://www.redhat.com/rhel/">RHEL4</a>/<a href="http://fedora.redhat.com/">Fedora</a>
@@ -118,22 +110,30 @@
         <li><a href="http://www.gentoo.org/">Gentoo</a></li>
       </ul>
 
-      <p>On all the above platforms a 32/64 bit Hadoop native library will work 
+      <p>On all the above distributions a 32/64 bit native hadoop library will work 
       with a respective 32/64 bit jvm.</p>
     </section>
     
     <section>
-      <title>Building Native Hadoop Libraries</title>
+      <title>Download</title>
+      
+      <p>The pre-built 32-bit i386-Linux native hadoop library is available as part of the 
+      hadoop distribution and is located in the <code>lib/native</code> directory. You can download the 
+      hadoop distribution from <a href="ext:releases/download">Hadoop Common Releases</a>.</p>
+      
+      <p>Be sure to install the zlib and/or gzip development packages - whichever compression 
+      codecs you want to use with your deployment.</p>
+     </section>    
+    
+    <section>
+      <title>Build</title>
       
-      <p>Hadoop native library is written in 
-      <a href="http://en.wikipedia.org/wiki/ANSI_C">ANSI C</a> and built using 
-      the GNU autotools-chain (autoconf, autoheader, automake, autoscan, libtool). 
-      This means it should be straight-forward to build them on any platform with 
-      a standards compliant C compiler and the GNU autotools-chain. 
-      See <a href="#Supported+Platforms">supported platforms</a>.</p>
+      <p>The native hadoop library is written in <a href="http://en.wikipedia.org/wiki/ANSI_C">ANSI C</a> 
+      and is built using the GNU autotools-chain (autoconf, autoheader, automake, autoscan, libtool). 
+      This means it should be straight-forward to build the library on any platform with a standards-compliant 
+      C compiler and the GNU autotools-chain (see the <a href="#Supported+Platforms">supported platforms</a>).</p>
 
-      <p>In particular the various packages you would need on the target 
-      platform are:</p>
+      <p>The packages you need to install on the target platform are:</p>
       <ul>
         <li>
           C compiler (e.g. <a href="http://gcc.gnu.org/">GNU C Compiler</a>)
@@ -149,52 +149,69 @@
         </li>
       </ul>
 
-      <p>Once you have the pre-requisites use the standard <code>build.xml</code> 
-      and pass along the <code>compile.native</code> flag (set to 
-      <code>true</code>) to build the native hadoop library:</p>
+      <p>Once you installed the prerequisite packages use the standard hadoop <code>build.xml</code> 
+      file and pass along the <code>compile.native</code> flag (set to <code>true</code>) to build the native hadoop library:</p>
 
       <p><code>$ ant -Dcompile.native=true &lt;target&gt;</code></p>
 
-      <p>The native hadoop library is not built by default since not everyone is 
-      interested in building them.</p>
-
-      <p>You should see the newly-built native hadoop library in:</p>
+      <p>You should see the newly-built library in:</p>
 
       <p><code>$ build/native/&lt;platform&gt;/lib</code></p>
 
-      <p>where &lt;platform&gt; is combination of the system-properties: 
-      <code>${os.name}-${os.arch}-${sun.arch.data.model}</code>; for e.g. 
-      Linux-i386-32.</p>
-
-      <section>
-        <title>Notes</title>
-        
+      <p>where &lt;<code>platform</code>&gt; is a combination of the system-properties: 
+      <code>${os.name}-${os.arch}-${sun.arch.data.model}</code> (for example, Linux-i386-32).</p>
+
+      <p>Please note the following:</p>
         <ul>
           <li>
-            It is <strong>mandatory</strong> to have the 
-            zlib, gzip, and bzip2
-            development packages on the target platform for building the 
-            native hadoop library; however for deployment it is sufficient to 
-            install one of them if you wish to use only one of them.
+            It is <strong>mandatory</strong> to install both the zlib and gzip
+            development packages on the target platform in order to build the 
+            native hadoop library; however, for deployment it is sufficient to 
+            install just one package if you wish to use only one codec.
           </li>
           <li>
-            It is necessary to have the correct 32/64 libraries of both zlib 
-            depending on the 32/64 bit jvm for the target platform for 
-            building/deployment of the native hadoop library.
+            It is necessary to have the correct 32/64 libraries for zlib,  
+            depending on the 32/64 bit jvm for the target platform, in order to 
+            build and deploy the native hadoop library.
           </li>
         </ul>
-      </section>
     </section>
+    
+     <section>
+      <title>Runtime</title>
+      <p>The <code>bin/hadoop</code> script ensures that the native hadoop
+      library is on the library path via the system property: <br/>
+      <em>-Djava.library.path=&lt;path&gt;</em></p>
+
+      <p>During runtime, check the hadoop log files for your MapReduce tasks.</p>
+      
+      <ul>
+         <li>If everything is all right, then:<br/><br/>
+          <code> DEBUG util.NativeCodeLoader - Trying to load the custom-built native-hadoop library...  </code><br/>
+          <code> INFO  util.NativeCodeLoader - Loaded the native-hadoop library </code><br/>
+         </li>
+         
+         <li>If something goes wrong, then:<br/><br/>
+         <code>
+          INFO util.NativeCodeLoader - Unable to load native-hadoop library for 
+          your platform... using builtin-java classes where applicable
+        </code>
+         
+         </li>
+      </ul>
+    </section>
+     </section>
+    
     <section>
-      <title> Loading native libraries through DistributedCache </title>
-      <p>User can load native shared libraries through  
-      <a href="mapred_tutorial.html#DistributedCache">DistributedCache</a>
-      for <em>distributing</em> and <em>symlinking</em> the library files</p>
+      <title>Native Shared Libraries</title>
+      <p>You can load <strong>any</strong> native shared library using  
+      <a href="http://hadoop.apache.org/mapreduce/docs/current/mapred_tutorial.html#DistributedCache">DistributedCache</a> 
+      for <em>distributing</em> and <em>symlinking</em> the library files.</p>
       
-      <p>Here is an example, describing how to distribute the library and
-      load it from map/reduce task. </p>
+      <p>This example shows you how to distribute a shared library, <code>mylib.so</code>, 
+      and load it from a MapReduce task.</p>
       <ol>
-      <li> First copy the library to the HDFS. <br/>
+      <li> First copy the library to the HDFS: <br/>
       <code>bin/hadoop fs -copyFromLocal mylib.so.1 /libraries/mylib.so.1</code>
       </li>
       <li> The job launching program should contain the following: <br/>
@@ -202,10 +219,13 @@
       <code> DistributedCache.addCacheFile("hdfs://host:port/libraries/mylib.so.1#mylib.so", conf);
       </code>
       </li>
-      <li> The map/reduce task can contain: <br/>
+      <li> The MapReduce task can contain: <br/>
       <code> System.loadLibrary("mylib.so"); </code>
       </li>
       </ol>
+      
+     <p><br/><strong>Note:</strong> If you downloaded or built the native hadoop library, you don’t need to use DistibutedCache to 
+     make the library available to your MapReduce tasks.</p>
     </section>
   </body>
   

Modified: hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/service_level_auth.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/service_level_auth.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/service_level_auth.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/service_level_auth.xml Sat Nov 28 19:53:33 2009
@@ -34,17 +34,15 @@
     </section>
     
     <section>
-      <title>Pre-requisites</title>
+      <title>Prerequisites</title>
       
-      <p>Ensure that Hadoop is installed, configured and setup correctly. More
-      details:</p> 
+      <p>Make sure Hadoop is installed, configured and setup correctly. For more information see: </p> 
       <ul>
         <li>
-          <a href="quickstart.html">Hadoop Quick Start</a> for first-time users.
+          <a href="single_node_setup.html">Single Node Setup</a> for first-time users.
         </li>
         <li>
-          <a href="cluster_setup.html">Hadoop Cluster Setup</a> for large, 
-          distributed clusters.
+          <a href="cluster_setup.html">Cluster Setup</a> for large, distributed clusters.
         </li>
       </ul>
     </section>
@@ -55,7 +53,7 @@
       <p>Service Level Authorization is the initial authorization mechanism to
       ensure clients connecting to a particular Hadoop <em>service</em> have the
       necessary, pre-configured, permissions and are authorized to access the given
-      service. For e.g. a Map/Reduce cluster can use this mechanism to allow a
+      service. For example, a MapReduce cluster can use this mechanism to allow a
       configured list of users/groups to submit jobs.</p>
       
       <p>The <code>${HADOOP_CONF_DIR}/hadoop-policy.xml</code> configuration file 
@@ -198,33 +196,33 @@
         <title>Examples</title>
         
         <p>Allow only users <code>alice</code>, <code>bob</code> and users in the 
-        <code>mapreduce</code> group to submit jobs to the Map/Reduce cluster:</p>
+        <code>mapreduce</code> group to submit jobs to the MapReduce cluster:</p>
         
-        <table>
-          <tr><td>&nbsp;&nbsp;&lt;property&gt;</td></tr>
-            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;security.job.submission.protocol.acl&lt;/name&gt;</td></tr>
-            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;alice,bob mapreduce&lt;/value&gt;</td></tr>
-          <tr><td>&nbsp;&nbsp;&lt;/property&gt;</td></tr>
-        </table>
+<source>
+&lt;property&gt;
+     &lt;name&gt;security.job.submission.protocol.acl&lt;/name&gt;
+     &lt;value&gt;alice,bob mapreduce&lt;/value&gt;
+&lt;/property&gt;
+</source>        
         
         <p></p><p>Allow only DataNodes running as the users who belong to the 
         group <code>datanodes</code> to communicate with the NameNode:</p> 
-        
-        <table>
-          <tr><td>&nbsp;&nbsp;&lt;property&gt;</td></tr>
-            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;security.datanode.protocol.acl&lt;/name&gt;</td></tr>
-            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt; datanodes&lt;/value&gt;</td></tr>
-          <tr><td>&nbsp;&nbsp;&lt;/property&gt;</td></tr>
-        </table>
+ 
+<source>
+&lt;property&gt;
+     &lt;name&gt;security.datanode.protocol.acl&lt;/name&gt;
+     &lt;value&gt;datanodes&lt;/value&gt;
+&lt;/property&gt;
+</source>        
         
         <p></p><p>Allow any user to talk to the HDFS cluster as a DFSClient:</p>
-        
-        <table>
-          <tr><td>&nbsp;&nbsp;&lt;property&gt;</td></tr>
-            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;security.client.protocol.acl&lt;/name&gt;</td></tr>
-            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;*&lt;/value&gt;</td></tr>
-          <tr><td>&nbsp;&nbsp;&lt;/property&gt;</td></tr>
-        </table>
+
+<source>
+&lt;property&gt;
+     &lt;name&gt;security.client.protocol.acl&lt;/name&gt;
+     &lt;value&gt;*&lt;/value&gt;
+&lt;/property&gt;
+</source>        
         
       </section>
     </section>

Modified: hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/site.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/site.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/site.xml Sat Nov 28 19:53:33 2009
@@ -34,40 +34,16 @@
   
    <docs label="Getting Started"> 
 		<overview   				label="Overview" 					href="index.html" />
-		<quickstart 				label="Quick Start"        		href="quickstart.html" />
+		<quickstart 				label="Single Node Setup"      href="single_node_setup.html" />
 		<setup     					label="Cluster Setup"      		href="cluster_setup.html" />
-		<mapred    				label="Map/Reduce Tutorial" 	href="mapred_tutorial.html" />
   </docs>	
 		
- <docs label="Programming Guides">
-		<commands 				label="Commands"     					href="commands_manual.html" />
-		<distcp    					label="DistCp"       						href="distcp.html" />
-		<native_lib    				label="Native Libraries" 					href="native_libraries.html" />
-		<streaming 				label="Streaming"          				href="streaming.html" />
-		<fair_scheduler 			label="Fair Scheduler" 					href="fair_scheduler.html"/>
-		<cap_scheduler 		label="Capacity Scheduler" 			href="capacity_scheduler.html"/>
+ <docs label="Guides">
+		<fsshell				        label="File System Shell"               href="file_system_shell.html" />
 		<SLA					 	label="Service Level Authorization" 	href="service_level_auth.html"/>
-		<vaidya    					label="Vaidya" 								href="vaidya.html"/>
-		<archives  				label="Archives"     						href="hadoop_archives.html"/>
+		<native_lib    				label="Native Libraries" 					href="native_libraries.html" />
    </docs>
-   
-   <docs label="HDFS">
-		<hdfs_user      				label="User Guide"    							href="hdfs_user_guide.html" />
-		<hdfs_arch     				label="Architecture"  								href="hdfs_design.html" />	
-		<hdfs_fs       	 				label="File System Shell Guide"     		href="hdfs_shell.html" />
-		<hdfs_perm      				label="Permissions Guide"    					href="hdfs_permissions_guide.html" />
-		<hdfs_quotas     			label="Quotas Guide" 							href="hdfs_quota_admin_guide.html" />
-		<hdfs_SLG        			label="Synthetic Load Generator Guide"  href="SLG_user_guide.html" />
-		<hdfs_imageviewer						label="Offline Image Viewer Guide"	href="hdfs_imageviewer.html" />
-		<hdfs_libhdfs   				label="C API libhdfs"         						href="libhdfs.html" /> 
-   </docs> 
-   
-   <docs label="HOD">
-		<hod_user 	label="User Guide" 	href="hod_user_guide.html"/>
-		<hod_admin 	label="Admin Guide" 	href="hod_admin_guide.html"/>
-		<hod_config 	label="Config Guide" 	href="hod_config_guide.html"/> 
-   </docs> 
-   
+
    <docs label="Miscellaneous"> 
 		<api       	label="API Docs"           href="ext:api/index" />
 		<jdiff     	label="API Changes"      href="ext:jdiff/changes" />
@@ -78,24 +54,26 @@
    </docs> 
    
   <external-refs>
-    <site      href="http://hadoop.apache.org/core/"/>
-    <lists     href="http://hadoop.apache.org/core/mailing_lists.html"/>
-    <archive   href="http://mail-archives.apache.org/mod_mbox/hadoop-core-commits/"/>
-    <releases  href="http://hadoop.apache.org/core/releases.html">
+    <site      href="http://hadoop.apache.org/common/"/>
+    <lists     href="http://hadoop.apache.org/common/mailing_lists.html"/>
+    <archive   href="http://mail-archives.apache.org/mod_mbox/hadoop-common-commits/"/>
+    <releases  href="http://hadoop.apache.org/common/releases.html">
       <download href="#Download" />
     </releases>
-    <jira      href="http://hadoop.apache.org/core/issue_tracking.html"/>
-    <wiki      href="http://wiki.apache.org/hadoop/" />
-    <faq       href="http://wiki.apache.org/hadoop/FAQ" />
-    <hadoop-default href="http://hadoop.apache.org/core/docs/current/hadoop-default.html" />
-    <core-default href="http://hadoop.apache.org/core/docs/current/core-default.html" />
-    <hdfs-default href="http://hadoop.apache.org/core/docs/current/hdfs-default.html" />
-    <mapred-default href="http://hadoop.apache.org/core/docs/current/mapred-default.html" />
+    <jira  href="http://hadoop.apache.org/common/issue_tracking.html"/>
+    <wiki  href="http://wiki.apache.org/hadoop/Common" />
+    <faq  href="http://wiki.apache.org/hadoop/Common/FAQ" />
+    
+    <common-default href="http://hadoop.apache.org/common/docs/current/common-default.html" />
+    <hdfs-default href="http://hadoop.apache.org/hdfs/docs/current/hdfs-default.html" />
+    <mapred-default href="http://hadoop.apache.org/mapreduce/docs/current/mapred-default.html" />
+    
     <zlib      href="http://www.zlib.net/" />
     <gzip      href="http://www.gzip.org/" />
     <bzip      href="http://www.bzip.org/" />
     <cygwin    href="http://www.cygwin.com/" />
     <osx       href="http://www.apple.com/macosx" />
+    
     <hod href="">
       <cluster-resources href="http://www.clusterresources.com" />
       <torque href="http://www.clusterresources.com/pages/products/torque-resource-manager.php" />
@@ -109,6 +87,7 @@
       <python href="http://www.python.org" />
       <twisted-python href="http://twistedmatrix.com/trac/" />
     </hod>
+    
     <relnotes href="releasenotes.html" />
     <changes href="changes.html" />
     <jdiff href="jdiff/">

Modified: hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/tabs.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/tabs.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/tabs.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/content/xdocs/tabs.xml Sat Nov 28 19:53:33 2009
@@ -30,8 +30,8 @@
     directory (ends in '/'), in which case /index.html will be added
   -->
 
-  <tab label="Project" href="http://hadoop.apache.org/core/" />
+  <tab label="Project" href="http://hadoop.apache.org/common/" />
   <tab label="Wiki" href="http://wiki.apache.org/hadoop" />
-  <tab label="Hadoop 0.21 Documentation" dir="" />  
+  <tab label="Common 0.21 Documentation" dir="" />  
   
 </tabs>

Modified: hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/skinconf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/skinconf.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/skinconf.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/docs/src/documentation/skinconf.xml Sat Nov 28 19:53:33 2009
@@ -68,7 +68,7 @@
   <project-name>Hadoop</project-name>
   <project-description>Scalable Computing Platform</project-description>
   <project-url>http://hadoop.apache.org/core/</project-url>
-  <project-logo>images/core-logo.gif</project-logo>
+  <project-logo>images/common-logo.jpg</project-logo>
 
   <!-- group logo -->
   <group-name>Hadoop</group-name>
@@ -146,11 +146,11 @@
     <!--Headers -->
 	#content h1 {
 	  margin-bottom: .5em;
-	  font-size: 200%; color: black;
+	  font-size: 185%; color: black;
 	  font-family: arial;
 	}  
-    h2, .h3 { font-size: 195%; color: black; font-family: arial; }
-	h3, .h4 { font-size: 140%; color: black; font-family: arial; margin-bottom: 0.5em; }
+    h2, .h3 { font-size: 175%; color: black; font-family: arial; }
+	h3, .h4 { font-size: 135%; color: black; font-family: arial; margin-bottom: 0.5em; }
 	h4, .h5 { font-size: 125%; color: black;  font-style: italic; font-weight: bold; font-family: arial; }
 	h5, h6 { font-size: 110%; color: #363636; font-weight: bold; } 
    

Propchange: hadoop/common/branches/HADOOP-6194/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Nov 28 19:53:33 2009
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/src/java:804966-807681
+/hadoop/common/trunk/src/java:804966-884903
 /hadoop/core/branches/branch-0.19/core/src/java:713112
 /hadoop/core/trunk/src/core:776175-785643,785929-786278

Modified: hadoop/common/branches/HADOOP-6194/src/java/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/core-default.xml?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/core-default.xml (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/core-default.xml Sat Nov 28 19:53:33 2009
@@ -26,13 +26,19 @@
 <!--- global properties -->
 
 <property>
+  <name>hadoop.common.configuration.version</name>
+  <value>0.21.0</value>
+  <description>version of this configuration file</description>
+</property>
+
+<property>
   <name>hadoop.tmp.dir</name>
   <value>/tmp/hadoop-${user.name}</value>
   <description>A base for other temporary directories.</description>
 </property>
 
 <property>
-  <name>hadoop.native.lib</name>
+  <name>io.native.lib.available</name>
   <value>true</value>
   <description>Should native hadoop libraries, if present, be used.</description>
 </property>
@@ -101,7 +107,7 @@
 
 <property>
   <name>io.serializations</name>
-  <value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
+  <value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization,org.apache.hadoop.io.serializer.avro.AvroGenericSerialization</value>
   <description>A list of serialization classes that can be used for
   obtaining serializers and deserializers.</description>
 </property>
@@ -116,10 +122,18 @@
   </description>
 </property>
 
+<property>
+  <name>io.map.index.skip</name>
+  <value>0</value>
+  <description>Number of index entries to skip between each entry.
+  Zero by default. Setting this to values larger than zero can
+  facilitate opening large MapFiles using less memory.</description>
+</property>
+
 <!-- file system properties -->
 
 <property>
-  <name>fs.default.name</name>
+  <name>fs.defaultFS</name>
   <value>file:///</value>
   <description>The name of the default file system.  A URI whose
   scheme and authority determine the FileSystem implementation.  The
@@ -149,6 +163,19 @@
 </property>
 
 <property>
+  <name>fs.AbstractFileSystem.file.impl</name>
+  <value>org.apache.hadoop.fs.local.LocalFs</value>
+  <description>The AbstractFileSystem for file: uris.</description>
+</property>
+
+
+<property>
+  <name>fs.AbstractFileSystem.hdfs.impl</name>
+  <value>org.apache.hadoop.fs.Hdfs</value>
+  <description>The FileSystem for hdfs: uris.</description>
+</property>
+
+<property>
   <name>fs.s3.impl</name>
   <value>org.apache.hadoop.fs.s3.S3FileSystem</value>
   <description>The FileSystem for s3: uris.</description>
@@ -195,6 +222,12 @@
 </property>
 
 <property>
+  <name>fs.har.impl.disable.cache</name>
+  <value>true</value>
+  <description>Don't cache 'har' filesystem instances.</description>
+</property>
+
+<property>
   <name>fs.checkpoint.dir</name>
   <value>${hadoop.tmp.dir}/dfs/namesecondary</value>
   <description>Determines where on the local filesystem the DFS secondary
@@ -230,7 +263,11 @@
   </description>
 </property>
 
-
+<property>
+  <name>fs.df.interval</name>
+  <value>60000</value>
+  <description>Disk usage statistics refresh interval in msec.</description>
+</property>
 
 <property>
   <name>fs.s3.block.size</name>
@@ -445,7 +482,7 @@
 <!-- Rack Configuration -->
 
 <property>
-  <name>topology.node.switch.mapping.impl</name>
+	<name>net.topology.node.switch.mapping.impl</name>
   <value>org.apache.hadoop.net.ScriptBasedMapping</value>
   <description> The default implementation of the DNSToSwitchMapping. It
     invokes a script specified in topology.script.file.name to resolve
@@ -455,7 +492,7 @@
 </property>
 
 <property>
-  <name>topology.script.file.name</name>
+  <name>net.topology.script.file.name</name>
   <value></value>
   <description> The script name that should be invoked to resolve DNS names to
     NetworkTopology names. Example: the script would take host.foo.bar as an
@@ -464,7 +501,7 @@
 </property>
 
 <property>
-  <name>topology.script.number.args</name>
+  <name>net.topology.script.number.args</name>
   <value>100</value>
   <description> The max number of args that the script configured with 
     topology.script.file.name should be run with. Each arg is an
@@ -472,6 +509,183 @@
   </description>
 </property>
 
+<!-- Local file system -->
+<property>
+  <name>file.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>file.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  file.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>file.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>file.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>file.replication</name>
+  <value>1</value>
+  <description>Replication factor</description>
+</property>
+
+<!-- s3 File System -->
+
+<property>
+  <name>s3.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>s3.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  s3.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>s3.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>s3.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>s3.replication</name>
+  <value>3</value>
+  <description>Replication factor</description>
+</property>
+
+<!-- s3native File System -->
+
+<property>
+  <name>s3native.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>s3native.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  s3native.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>s3native.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>s3native.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>s3native.replication</name>
+  <value>3</value>
+  <description>Replication factor</description>
+</property>
+
+<!-- Kosmos File System -->
+
+<property>
+  <name>kfs.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>kfs.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  kfs.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>kfs.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>kfs.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>kfs.replication</name>
+  <value>3</value>
+  <description>Replication factor</description>
+</property>
+
+<!-- FTP file system -->
+<property>
+  <name>ftp.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>ftp.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  ftp.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>ftp.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>ftp.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>ftp.replication</name>
+  <value>3</value>
+  <description>Replication factor</description>
+</property>
 
 
 </configuration>

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/conf/Configuration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/conf/Configuration.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/conf/Configuration.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/conf/Configuration.java Sat Nov 28 19:53:33 2009
@@ -44,6 +44,7 @@
 import java.util.Set;
 import java.util.StringTokenizer;
 import java.util.WeakHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -59,6 +60,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -167,8 +169,11 @@
    * List of default Resources. Resources are loaded in the order of the list 
    * entries
    */
-  private static final ArrayList<String> defaultResources = 
-    new ArrayList<String>();
+  private static final CopyOnWriteArrayList<String> defaultResources =
+    new CopyOnWriteArrayList<String>();
+
+  private static final Map<ClassLoader, Map<String, Class<?>>>
+    CACHE_CLASSES = new WeakHashMap<ClassLoader, Map<String, Class<?>>>();
   
   /**
    * Flag to indicate if the storage of resource which updates a key needs 
@@ -181,6 +186,155 @@
    * the key most recently
    */
   private HashMap<String, String> updatingResource;
+ 
+  /**
+   * Class to keep the information about the keys which replace the deprecated
+   * ones.
+   * 
+   * This class stores the new keys which replace the deprecated keys and also
+   * gives a provision to have a custom message for each of the deprecated key
+   * that is being replaced. It also provides method to get the appropriate
+   * warning message which can be logged whenever the deprecated key is used.
+   */
+  private static class DeprecatedKeyInfo {
+    private String[] newKeys;
+    private String customMessage;
+    private boolean accessed;
+    DeprecatedKeyInfo(String[] newKeys, String customMessage) {
+      this.newKeys = newKeys;
+      this.customMessage = customMessage;
+      accessed = false;
+    }
+    DeprecatedKeyInfo(String[] newKeys) {
+      this(newKeys, null);
+    }
+
+    /**
+     * Method to provide the warning message. It gives the custom message if
+     * non-null, and default message otherwise.
+     * @param key the associated deprecated key.
+     * @return message that is to be logged when a deprecated key is used.
+     */
+    private final String getWarningMessage(String key) {
+      String warningMessage;
+      if(customMessage == null) {
+        StringBuilder message = new StringBuilder(key);
+        String deprecatedKeySuffix = " is deprecated. Instead, use ";
+        message.append(deprecatedKeySuffix);
+        for (int i = 0; i < newKeys.length; i++) {
+          message.append(newKeys[i]);
+          if(i != newKeys.length-1) {
+            message.append(", ");
+          }
+        }
+        warningMessage = message.toString();
+      }
+      else {
+        warningMessage = customMessage;
+      }
+      accessed = true;
+      return warningMessage;
+    }
+  }
+  
+  /**
+   * Stores the deprecated keys, the new keys which replace the deprecated keys
+   * and custom message(if any provided).
+   */
+  private static Map<String, DeprecatedKeyInfo> deprecatedKeyMap = 
+    new HashMap<String, DeprecatedKeyInfo>();
+  
+  /**
+   * Adds the deprecated key to the deprecation map.
+   * It does not override any existing entries in the deprecation map.
+   * This is to be used only by the developers in order to add deprecation of
+   * keys, and attempts to call this method after loading resources once,
+   * would lead to <tt>UnsupportedOperationException</tt>
+   * @param key
+   * @param newKeys
+   * @param customMessage
+   */
+  public synchronized static void addDeprecation(String key, String[] newKeys,
+      String customMessage) {
+    if (key == null || key.length() == 0 ||
+        newKeys == null || newKeys.length == 0) {
+      throw new IllegalArgumentException();
+    }
+    if (!isDeprecated(key)) {
+      DeprecatedKeyInfo newKeyInfo;
+      if (customMessage == null) {
+        newKeyInfo = new DeprecatedKeyInfo(newKeys);
+      }
+      else {
+        newKeyInfo = new DeprecatedKeyInfo(newKeys, customMessage);
+      }
+      deprecatedKeyMap.put(key, newKeyInfo);
+    }
+  }
+
+  /**
+   * Adds the deprecated key to the deprecation map when no custom message
+   * is provided.
+   * It does not override any existing entries in the deprecation map.
+   * This is to be used only by the developers in order to add deprecation of
+   * keys, and attempts to call this method after loading resources once,
+   * would lead to <tt>UnsupportedOperationException</tt>
+   * 
+   * @param key Key that is to be deprecated
+   * @param newKeys list of keys that take up the values of deprecated key
+   */
+  public synchronized static void addDeprecation(String key, String[] newKeys) {
+    addDeprecation(key, newKeys, null);
+  }
+  
+  /**
+   * checks whether the given <code>key</code> is deprecated.
+   * 
+   * @param key the parameter which is to be checked for deprecation
+   * @return <code>true</code> if the key is deprecated and 
+   *         <code>false</code> otherwise.
+   */
+  private static boolean isDeprecated(String key) {
+    return deprecatedKeyMap.containsKey(key);
+  }
+ 
+  /**
+   * Check whether or not the deprecated key has been specified in the
+   * configuration file rather than the new key
+   * 
+   * Returns false if the specified key is not included in the deprecated
+   * key mapping.
+   * 
+   * @param oldKey Old configuration key 
+   * @return If the old configuration key was specified rather than the new one
+   */
+  public boolean deprecatedKeyWasSet(String oldKey) {
+    return isDeprecated(oldKey) && deprecatedKeyMap.get(oldKey).accessed;
+  }
+  
+  /**
+   * Checks for the presence of the property <code>name</code> in the
+   * deprecation map. Returns the first of the list of new keys if present
+   * in the deprecation map or the <code>name</code> itself.
+   * @param name the property name
+   * @return the first property in the list of properties mapping
+   *         the <code>name</code> or the <code>name</code> itself.
+   */
+  private String handleDeprecation(String name) {
+    if (isDeprecated(name)) {
+      DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
+      if (!keyInfo.accessed) {
+        LOG.warn(keyInfo.getWarningMessage(name));
+      }
+      for (String newKey : keyInfo.newKeys) {
+        if(newKey != null) {
+          name = newKey;
+          break;
+        }
+      }
+    }
+    return name;
+  }
   
   static{
     //print deprecation warning if hadoop-site.xml is found in classpath
@@ -197,6 +351,13 @@
     }
     addDefaultResource("core-default.xml");
     addDefaultResource("core-site.xml");
+    //Add code for managing deprecated key mapping
+    //for example
+    //addDeprecation("oldKey1",new String[]{"newkey1","newkey2"});
+    //adds deprecation for oldKey1 to two new keys(newkey1, newkey2).
+    //so get or set of oldKey1 will correctly populate/access values of 
+    //newkey1 and newkey2
+    addDeprecatedKeys();
   }
   
   private Properties properties;
@@ -223,9 +384,6 @@
    */
   public Configuration(boolean loadDefaults) {
     this.loadDefaults = loadDefaults;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(StringUtils.stringifyException(new IOException("config()")));
-    }
     synchronized(Configuration.class) {
       REGISTRY.put(this, null);
     }
@@ -255,11 +413,6 @@
    */
   @SuppressWarnings("unchecked")
   public Configuration(Configuration other) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(StringUtils.stringifyException
-                (new IOException("config(config)")));
-    }
-   
    this.resources = (ArrayList)other.resources.clone();
    synchronized(other) {
      if (other.properties != null) {
@@ -405,40 +558,60 @@
   
   /**
    * Get the value of the <code>name</code> property, <code>null</code> if
-   * no such property exists.
+   * no such property exists. If the key is deprecated, it returns the value of
+   * the first key which replaces the deprecated key and is not null
    * 
    * Values are processed for <a href="#VariableExpansion">variable expansion</a> 
    * before being returned. 
    * 
    * @param name the property name.
-   * @return the value of the <code>name</code> property, 
+   * @return the value of the <code>name</code> or its replacing property, 
    *         or null if no such property exists.
    */
   public String get(String name) {
+    name = handleDeprecation(name);
     return substituteVars(getProps().getProperty(name));
   }
 
   /**
    * Get the value of the <code>name</code> property, without doing
-   * <a href="#VariableExpansion">variable expansion</a>.
+   * <a href="#VariableExpansion">variable expansion</a>.If the key is 
+   * deprecated, it returns the value of the first key which replaces 
+   * the deprecated key and is not null.
    * 
    * @param name the property name.
-   * @return the value of the <code>name</code> property, 
-   *         or null if no such property exists.
+   * @return the value of the <code>name</code> property or 
+   *         its replacing property and null if no such property exists.
    */
   public String getRaw(String name) {
+    name = handleDeprecation(name);
     return getProps().getProperty(name);
   }
 
   /** 
-   * Set the <code>value</code> of the <code>name</code> property.
+   * Set the <code>value</code> of the <code>name</code> property. If 
+   * <code>name</code> is deprecated, it sets the <code>value</code> to the keys
+   * that replace the deprecated key.
    * 
    * @param name property name.
    * @param value property value.
    */
   public void set(String name, String value) {
-    getOverlay().setProperty(name, value);
-    getProps().setProperty(name, value);
+    if (deprecatedKeyMap.isEmpty()) {
+      getProps();
+    }
+    if (!isDeprecated(name)) {
+      getOverlay().setProperty(name, value);
+      getProps().setProperty(name, value);
+    }
+    else {
+      DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
+      LOG.warn(keyInfo.getWarningMessage(name));
+      for (String newKey : keyInfo.newKeys) {
+        getOverlay().setProperty(newKey, value);
+        getProps().setProperty(newKey, value);
+      }
+    }
   }
   
   /**
@@ -460,8 +633,11 @@
   }
 
   /** 
-   * Get the value of the <code>name</code> property. If no such property 
-   * exists, then <code>defaultValue</code> is returned.
+   * Get the value of the <code>name</code>. If the key is deprecated,
+   * it returns the value of the first key which replaces the deprecated key
+   * and is not null.
+   * If no such property exists,
+   * then <code>defaultValue</code> is returned.
    * 
    * @param name property name.
    * @param defaultValue default value.
@@ -469,6 +645,7 @@
    *         doesn't exist.                    
    */
   public String get(String name, String defaultValue) {
+    name = handleDeprecation(name);
     return substituteVars(getProps().getProperty(name, defaultValue));
   }
     
@@ -869,7 +1046,27 @@
    * @throws ClassNotFoundException if the class is not found.
    */
   public Class<?> getClassByName(String name) throws ClassNotFoundException {
-    return Class.forName(name, true, classLoader);
+    Map<String, Class<?>> map;
+    
+    synchronized (CACHE_CLASSES) {
+      map = CACHE_CLASSES.get(classLoader);
+      if (map == null) {
+        map = Collections.synchronizedMap(
+          new WeakHashMap<String, Class<?>>());
+        CACHE_CLASSES.put(classLoader, map);
+      }
+    }
+
+    Class clazz = map.get(name);
+    if (clazz == null) {
+      clazz = Class.forName(name, true, classLoader);
+      if (clazz != null) {
+        // two putters can race here, but they'll put the same class
+        map.put(name, clazz);
+      }
+    }
+
+    return clazz;
   }
 
   /** 
@@ -1180,8 +1377,62 @@
     for (Object resource : resources) {
       loadResource(properties, resource, quiet);
     }
+    // process for deprecation.
+    processDeprecatedKeys();
   }
-
+  /**
+   * Updates the keys that are replacing the deprecated keys and removes the 
+   * deprecated keys from memory.
+   */
+  private void processDeprecatedKeys() {
+    for (Map.Entry<String, DeprecatedKeyInfo> item : 
+      deprecatedKeyMap.entrySet()) {
+      if (!properties.containsKey(item.getKey())) {
+        continue;
+      }
+      String oldKey = item.getKey();
+      deprecatedKeyMap.get(oldKey).accessed = false;
+      setDeprecatedValue(oldKey, properties.getProperty(oldKey),
+          finalParameters.contains(oldKey));
+      properties.remove(oldKey);
+      if (finalParameters.contains(oldKey)) {
+        finalParameters.remove(oldKey);
+      }
+      if (storeResource) {
+        updatingResource.remove(oldKey);
+      }
+    }
+  }
+  
+  /**
+   * Sets the deprecated key's value to the associated mapped keys
+   * @param attr the deprecated key
+   * @param value the value corresponding to the deprecated key
+   * @param finalParameter flag to indicate if <code>attr</code> is
+   *        marked as final
+   */
+  private void setDeprecatedValue(String attr,
+      String value, boolean finalParameter) {
+    DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(attr);
+    for (String key:keyInfo.newKeys) {
+      // update replacing keys with deprecated key's value in all cases,
+      // except when the replacing key is already set to final
+      // and finalParameter is false
+      if (finalParameters.contains(key) && !finalParameter) {
+        LOG.warn("An attempt to override final parameter: "+key
+            +";  Ignoring.");
+        continue;
+      }
+      properties.setProperty(key, value);
+      if (storeResource) {
+        updatingResource.put(key, updatingResource.get(attr));
+      }
+      if (finalParameter) {
+        finalParameters.add(key);
+      }
+    }
+  }
+  
   private void loadResource(Properties properties, Object name, boolean quiet) {
     try {
       DocumentBuilderFactory docBuilderFactory 
@@ -1286,17 +1537,20 @@
         }
         
         // Ignore this parameter if it has already been marked as 'final'
-        if (attr != null && value != null) {
-          if (!finalParameters.contains(attr)) {
-            properties.setProperty(attr, value);
-            if (storeResource) {
-              updatingResource.put(attr, name.toString());
-            }
-            if (finalParameter)
-              finalParameters.add(attr);
-          } else {
-            LOG.warn(name+":a attempt to override final parameter: "+attr
+        if (attr != null) {
+          if (value != null) {
+            if (!finalParameters.contains(attr)) {
+              properties.setProperty(attr, value);
+              if (storeResource) {
+                updatingResource.put(attr, name.toString());
+              }
+            } else {
+              LOG.warn(name+":a attempt to override final parameter: "+attr
                      +";  Ignoring.");
+            }
+          }
+          if (finalParameter) {
+            finalParameters.add(attr);
           }
         }
       }
@@ -1432,7 +1686,7 @@
     return sb.toString();
   }
 
-  private void toString(ArrayList resources, StringBuffer sb) {
+  private void toString(List resources, StringBuffer sb) {
     ListIterator i = resources.listIterator();
     while (i.hasNext()) {
       if (i.nextIndex() != 0) {
@@ -1483,4 +1737,28 @@
     }
   }
 
+  //Load deprecated keys in common
+  private static void addDeprecatedKeys() {
+    Configuration.addDeprecation("topology.script.file.name", 
+               new String[]{CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY});
+    Configuration.addDeprecation("topology.script.number.args", 
+               new String[]{CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY});
+    Configuration.addDeprecation("hadoop.configured.node.mapping", 
+               new String[]{CommonConfigurationKeys.NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY});
+    Configuration.addDeprecation("topology.node.switch.mapping.impl", 
+               new String[]{CommonConfigurationKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY});
+    Configuration.addDeprecation("dfs.umask", 
+               new String[]{CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY},
+               "dfs.umask is deprecated, use " + 
+               CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY + 
+               " with octal or symbolic specifications.");
+    Configuration.addDeprecation("dfs.df.interval", 
+               new String[]{CommonConfigurationKeys.FS_DF_INTERVAL_KEY});
+    Configuration.addDeprecation("dfs.client.buffer.dir", 
+               new String[]{CommonConfigurationKeys.FS_CLIENT_BUFFER_DIR_KEY});
+    Configuration.addDeprecation("hadoop.native.lib", 
+               new String[]{CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY});
+    Configuration.addDeprecation("fs.default.name", 
+               new String[]{CommonConfigurationKeys.FS_DEFAULT_NAME_KEY});
+  }
 }

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java Sat Nov 28 19:53:33 2009
@@ -54,7 +54,8 @@
   public void setConf(Configuration conf) {
     super.setConf(conf);
     if (conf != null) {
-      bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
+      bytesPerChecksum = conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_KEY,
+		                     LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_DEFAULT);
     }
   }
   
@@ -94,7 +95,9 @@
   }
 
   private int getSumBufferSize(int bytesPerSum, int bufferSize) {
-    int defaultBufferSize = getConf().getInt("io.file.buffer.size", 4096);
+    int defaultBufferSize = getConf().getInt(
+                       LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
+                       LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT);
     int proportionalBufferSize = bufferSize / bytesPerSum;
     return Math.max(bytesPerSum,
                     Math.max(proportionalBufferSize, defaultBufferSize));
@@ -119,7 +122,9 @@
     
     public ChecksumFSInputChecker(ChecksumFileSystem fs, Path file)
       throws IOException {
-      this(fs, file, fs.getConf().getInt("io.file.buffer.size", 4096));
+      this(fs, file, fs.getConf().getInt(
+                       LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY, 
+                       LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT));
     }
     
     public ChecksumFSInputChecker(ChecksumFileSystem fs, Path file, int bufferSize)
@@ -320,7 +325,8 @@
                           Configuration conf)
       throws IOException {
       this(fs, file, overwrite, 
-           conf.getInt("io.file.buffer.size", 4096),
+           conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
+		       LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT),
            replication, blockSize, null);
     }
     

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/DF.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/DF.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/DF.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/DF.java Sat Nov 28 19:53:33 2009
@@ -25,6 +25,7 @@
 import java.util.StringTokenizer;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 
 /** Filesystem disk space usage statistics.  Uses the unix 'df' program.
@@ -72,7 +73,7 @@
   }
 
   public DF(File path, Configuration conf) throws IOException {
-    this(path, conf.getLong("dfs.df.interval", DF.DF_INTERVAL_DEFAULT));
+    this(path, conf.getLong(CommonConfigurationKeys.FS_DF_INTERVAL_KEY, DF.DF_INTERVAL_DEFAULT));
   }
 
   public DF(File path, long dfInterval) throws IOException {



Mime
View raw message