hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ste...@apache.org
Subject svn commit: r885143 [4/18] - in /hadoop/hdfs/branches/HDFS-326: ./ .eclipse.templates/ .eclipse.templates/.launches/ conf/ ivy/ lib/ src/ant/org/apache/hadoop/ant/ src/ant/org/apache/hadoop/ant/condition/ src/c++/ src/c++/libhdfs/ src/c++/libhdfs/docs/...
Date Sat, 28 Nov 2009 20:06:08 GMT
Modified: hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java
(original)
+++ hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java
Sat Nov 28 20:05:56 2009
@@ -25,6 +25,7 @@
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.Shell;
 
@@ -37,7 +38,7 @@
   static final int CLEANUP_THRESHOLD = 1000;
 
   static {
-    Configuration conf = new Configuration(false);
+    Configuration conf = new HdfsConfiguration(false);
     conf.addResource("hdfsproxy-default.xml");
     ugiLifetime = conf.getLong("hdfsproxy.ugi.cache.ugi.lifetime", 15) * 60 * 1000L;
   }

Modified: hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
(original)
+++ hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
Sat Nov 28 20:05:56 2009
@@ -51,6 +51,8 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.HostsFileReader;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
 /**
  * Proxy Utility .
  */
@@ -312,7 +314,7 @@
           + UtilityOption.CHECKCERTS.getName() + " <hostname> <#port> ]");
       System.exit(0);
     }
-    Configuration conf = new Configuration(false);
+    Configuration conf = new HdfsConfiguration(false);
     conf.addResource("ssl-client.xml");
     conf.addResource("hdfsproxy-default.xml");
 

Modified: hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
(original)
+++ hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
Sat Nov 28 20:05:56 2009
@@ -39,6 +39,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.NetUtils;
 
 /**
@@ -121,7 +122,7 @@
 
   private static MyFile[] createFiles(URI fsname, String topdir)
       throws IOException {
-    return createFiles(FileSystem.get(fsname, new Configuration()), topdir);
+    return createFiles(FileSystem.get(fsname, new HdfsConfiguration()), topdir);
   }
 
   /**
@@ -203,13 +204,13 @@
     HdfsProxy proxy = null;
     try {
 
-      final Configuration dfsConf = new Configuration();
+      final Configuration dfsConf = new HdfsConfiguration();
       cluster = new MiniDFSCluster(dfsConf, 2, true, null);
       cluster.waitActive();
 
       final FileSystem localfs = FileSystem.get(LOCAL_FS, dfsConf);
       final FileSystem hdfs = cluster.getFileSystem();
-      final Configuration proxyConf = new Configuration(false);
+      final Configuration proxyConf = new HdfsConfiguration(false);
       proxyConf.set("hdfsproxy.dfs.namenode.address", hdfs.getUri().getHost() + ":"
           + hdfs.getUri().getPort());
       proxyConf.set("hdfsproxy.https.address", "localhost:0");

Modified: hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java
(original)
+++ hadoop/hdfs/branches/HDFS-326/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java
Sat Nov 28 20:05:56 2009
@@ -21,6 +21,7 @@
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /** Unit tests for ProxyUtil */
 public class TestProxyUtil extends TestCase {
@@ -30,7 +31,7 @@
 
   public void testSendCommand() throws Exception {
       
-    Configuration conf = new Configuration(false);  
+    Configuration conf = new HdfsConfiguration(false);  
     conf.addResource("ssl-client.xml");
     conf.addResource("hdfsproxy-default.xml");
     String address = "localhost:" + TEST_PROXY_HTTPS_PORT;

Modified: hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/ivy.xml?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/ivy.xml (original)
+++ hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/ivy.xml Sat Nov 28 20:05:56 2009
@@ -24,6 +24,10 @@
     <artifact conf="master"/>
   </publications>
   <dependencies>
+    <dependency org="org.apache.hadoop"
+      name="hadoop-core"
+      rev="${hadoop-core.version}"
+      conf="common->default"/>
     <dependency org="commons-logging"
       name="commons-logging"
       rev="${commons-logging.version}"

Modified: hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
(original)
+++ hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
Sat Nov 28 20:05:56 2009
@@ -26,6 +26,8 @@
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
 /**
  * ThriftHadoopFileSystem
  * A thrift wrapper around the Hadoop File System
@@ -122,7 +124,7 @@
      * @param name - the name of this handler
      */
     public HadoopThriftHandler(String name) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       now = now();
       try {
         inactivityThread = new Daemon(new InactivityMonitor());

Modified: hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java
(original)
+++ hadoop/hdfs/branches/HDFS-326/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java
Sat Nov 28 20:05:56 2009
@@ -35,7 +35,7 @@
 
   public void testServer() throws IOException
   {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
     cluster.waitActive();
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();

Modified: hadoop/hdfs/branches/HDFS-326/src/docs/src/documentation/content/xdocs/SLG_user_guide.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/docs/src/documentation/content/xdocs/SLG_user_guide.xml?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/docs/src/documentation/content/xdocs/SLG_user_guide.xml
(original)
+++ hadoop/hdfs/branches/HDFS-326/src/docs/src/documentation/content/xdocs/SLG_user_guide.xml
Sat Nov 28 20:05:56 2009
@@ -18,12 +18,12 @@
 <!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
 <document>
 	<header>
-		<title> HDFS Synthetic Load Generator Guide </title>
+		<title>Synthetic Load Generator Guide </title>
 	</header>
 	<body>
-		<section>
-			<title> Description </title>
-			<p>
+	<section>
+	<title>Overview</title>
+		<p>
         The synthetic load generator (SLG) is a tool for testing NameNode behavior
         under different client loads. The user can generate different mixes 
         of read, write, and list requests by specifying the probabilities of
@@ -33,91 +33,121 @@
         monitor the running of the NameNode. When a load generator exits, it
         prints some NameNode statistics like the average execution time of each
         kind of operation and the NameNode throughput.
-                       </p>
-                </section>
-		<section>
-			<title> Synopsis </title>
-			<p>
-        <code>java LoadGenerator [options]</code><br/>
-                        </p>
-                        <p>
-        Options include:<br/>
-        <code>&nbsp;&nbsp;-readProbability &lt;read probability&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the probability of the read operation;
-                default is 0.3333. </code><br/>
-        <code>&nbsp;&nbsp;-writeProbability &lt;write probability&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the probability of the write

-                operations; default is 0.3333.</code><br/>
-        <code>&nbsp;&nbsp;-root &lt;test space root&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the root of the test space;
-                default is /testLoadSpace.</code><br/>
-        <code>&nbsp;&nbsp;-maxDelayBetweenOps 
-                &lt;maxDelayBetweenOpsInMillis&gt;</code><br/> 
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the maximum delay between two
consecutive
-                operations in a thread; default is 0 indicating no delay.
-                </code><br/>
-        <code>&nbsp;&nbsp;-numOfThreads &lt;numOfThreads&gt;</code><br/>

-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the number of threads to spawn;

-                default is 200.</code><br/>
-        <code>&nbsp;&nbsp;-elapsedTime &lt;elapsedTimeInSecs&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the number of seconds that the
program 
-                will run; A value of zero indicates that the program runs
-                forever. The default value is 0.</code><br/>
-        <code>&nbsp;&nbsp;-startTime &lt;startTimeInMillis&gt;</code><br/>

-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the time that all worker threads

+        </p>
+     </section>
+                
+	<section>
+	<title> Synopsis </title>
+	  <p>
+        The synopsis of the command is:
+      </p>
+		<source>java LoadGenerator [options]</source>
+        <p> Options include:</p>
+        
+    <ul>
+    <li>
+        <code>-readProbability &lt;read probability&gt;</code><br/>
+        The probability of the read operation; default is 0.3333.
+    </li>
+ 
+    <li>               
+        <code>-writeProbability &lt;write probability&gt;</code><br/>
+        The probability of the write operations; default is 0.3333.
+    </li>
+
+   <li>            
+        <code>-root &lt;test space root&gt;</code><br/>
+        The root of the test space; default is /testLoadSpace.
+    </li> 
+
+    <li>           
+        <code>-maxDelayBetweenOps &lt;maxDelayBetweenOpsInMillis&gt;</code><br/>

+        The maximum delay between two consecutive operations in a thread; default is 0 indicating
no delay.
+    </li> 
+
+    <li>            
+        <code>-numOfThreads &lt;numOfThreads&gt;</code><br/>
+        The number of threads to spawn; default is 200.
+    </li>
+
+     <li>          
+        <code>-elapsedTime &lt;elapsedTimeInSecs&gt;</code><br/>
+        The number of seconds that the program 
+        will run; A value of zero indicates that the program runs
+        forever. The default value is 0.
+     </li> 
+
+    <li>            
+        <code>-startTime &lt;startTimeInMillis&gt;</code><br/>

+        The time that all worker threads 
                 start to run. By default it is 10 seconds after the main 
                 program starts running.This creates a barrier if more than
                 one load generator is running.
-        </code><br/>
-        <code>&nbsp;&nbsp;-seed &lt;seed&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the random generator seed for
repeating 
+      </li>
+    
+    <li>     
+        <code>-seed &lt;seed&gt;</code><br/>
+        The random generator seed for repeating 
                 requests to NameNode when running with a single thread;
-                default is the current time.</code><br/>
-			</p>
-			<p>
+                default is the current time.
+     </li>
+			
+	</ul>
+			
+	<p>
         After command line argument parsing, the load generator traverses 
         the test space and builds a table of all directories and another table
         of all files in the test space. It then waits until the start time to
-        spawn the number of worker threads as specified by the user. Each
-        thread sends a stream of requests to NameNode. At each iteration, 
+        spawn the number of worker threads as specified by the user. 
+        
+        Each thread sends a stream of requests to NameNode. At each iteration, 
         it first decides if it is going to read a file, create a file, or
         list a directory following the read and write probabilities specified
         by the user. The listing probability is equal to 
         <em>1-read probability-write probability</em>. When reading, 
         it randomly picks a file in the test space and reads the entire file. 
         When writing, it randomly picks a directory in the test space and 
-        creates a file there. To avoid two threads with the same load 
-        generator or from two different load generators create the same 
+        creates a file there. 
+    </p>
+    <p>
+        To avoid two threads with the same load 
+        generator or from two different load generators creating the same 
         file, the file name consists of the current machine's host name 
         and the thread id. The length of the file follows Gaussian 
         distribution with an average size of 2 blocks and the standard 
-        deviation of 1. The new file is filled with byte 'a'. To avoid
-        the test space to grow indefinitely, the file is deleted immediately
-        after the file creation completes. While listing, it randomly 
-        picks a directory in the test space and lists its content. 
+        deviation of 1. The new file is filled with byte 'a'. To avoid the test 
+        space growing indefinitely, the file is deleted immediately
+        after the file creation completes. While listing, it randomly picks 
+        a directory in the test space and lists its content. 
+     </p>
+     <p>   
         After an operation completes, the thread pauses for a random 
         amount of time in the range of [0, maxDelayBetweenOps] if the 
         specified maximum delay is not zero. All threads are stopped when 
         the specified elapsed time is passed. Before exiting, the program 
         prints the average execution for each kind of NameNode operations, 
         and the number of requests served by the NameNode per second.
-                        </p>
-                </section>
-                <section>
-                        <title> Test Space Population </title>
-                        <p>
-        The user needs to populate a test space before she runs a 
+    </p>
+    
+     </section>
+                
+     <section>
+     <title> Test Space Population </title>
+     <p>
+        The user needs to populate a test space before running a 
         load generator. The structure generator generates a random 
         test space structure and the data generator creates the files 
         and directories of the test space in Hadoop distributed file system.
-                        </p>
-                        <section>
-                                <title> Structure Generator </title>
-                                <p>
+     </p>
+     
+     <section>
+     <title> Structure Generator </title>
+    <p>
         This tool generates a random namespace structure with the 
         following constraints:
-                                </p>
-                                        <ol>
+     </p>
+     
+     <ol>
         <li>The number of subdirectories that a directory can have is 
             a random number in [minWidth, maxWidth].</li>
         <li>The maximum depth of each subdirectory is a random number 
@@ -125,69 +155,83 @@
         <li>Files are randomly placed in leaf directories. The size of 
             each file follows Gaussian distribution with an average size 
             of 1 block and a standard deviation of 1.</li>
-                                        </ol>
-                                <p>
+     </ol>
+      <p>
         The generated namespace structure is described by two files in 
         the output directory. Each line of the first file contains the 
         full name of a leaf directory. Each line of the second file 
         contains the full name of a file and its size, separated by a blank.
-                                </p>
-                                <p>
-        The synopsis of the command is
-                                </p>
-                                <p>
-        <code>java StructureGenerator [options]</code>
-                                </p>
-                                <p>
-        Options include:<br/>
-        <code>&nbsp;&nbsp;-maxDepth &lt;maxDepth&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;maximum depth of the directory
tree; 
-                default is 5.</code><br/>
-        <code>&nbsp;&nbsp;-minWidth &lt;minWidth&gt;</code><br/>

-        <code>&nbsp;&nbsp;&nbsp;&nbsp;minimum number of subdirectories
per 
-                directories; default is 1.</code><br/>
-        <code>&nbsp;&nbsp;-maxWidth &lt;maxWidth&gt;</code><br/>

-        <code>&nbsp;&nbsp;&nbsp;&nbsp;maximum number of subdirectories
per 
-                directories; default is 5.</code><br/>
-        <code>&nbsp;&nbsp;-numOfFiles &lt;#OfFiles&gt;</code><br/>

-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the total number of files in
the test 
-                space; default is 10.</code><br/>
-        <code>&nbsp;&nbsp;-avgFileSize &lt;avgFileSizeInBlocks&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;average size of blocks; default
is 1.
-                </code><br/>
-        <code>&nbsp;&nbsp;-outDir &lt;outDir&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;output directory; default is
the 
-                current directory. </code><br/>
-        <code>&nbsp;&nbsp;-seed &lt;seed&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;random number generator seed;

-                default is the current time.</code><br/>
-                                </p>
-                        </section>
-                        <section>
-                                <title> Test Space Generator </title>
-                                <p>
+      </p>
+      <p>
+        The synopsis of the command is:
+      </p>
+      <source>java StructureGenerator [options]</source>
+
+     <p>Options include:</p>
+     <ul>
+     <li>
+        <code>-maxDepth &lt;maxDepth&gt;</code><br/>
+        Maximum depth of the directory tree; default is 5.
+     </li>
+
+     <li>    
+        <code>-minWidth &lt;minWidth&gt;</code><br/> 
+        Minimum number of subdirectories per directories; default is 1.
+     </li> 
+
+     <li>  
+        <code>-maxWidth &lt;maxWidth&gt;</code><br/> 
+        Maximum number of subdirectories per directories; default is 5.
+      </li>
+
+     <li>           
+        <code>-numOfFiles &lt;#OfFiles&gt;</code><br/> 
+        The total number of files in the test space; default is 10.
+      </li>
+
+     <li>          
+        <code>-avgFileSize &lt;avgFileSizeInBlocks&gt;</code><br/>
+        Average size of blocks; default is 1.
+      </li> 
+
+     <li>           
+        <code>-outDir &lt;outDir&gt;</code><br/>
+        Output directory; default is the current directory.
+     </li>
+
+     <li>           
+        <code>-seed &lt;seed&gt;</code><br/>
+        Random number generator seed; default is the current time.
+    </li>            
+     </ul>
+     </section>
+
+    <section>
+    <title>Data Generator </title>
+         <p>
         This tool reads the directory structure and file structure from 
         the input directory and creates the namespace in Hadoop distributed
         file system. All files are filled with byte 'a'.
-                                </p>
-                                <p>
-        The synopsis of the command is
-                                </p>
-                                <p>
-        <code>java DataGenerator [options]</code>
-                                </p>
-                                <p>
-        Options include:<br/>
-        <code>&nbsp;&nbsp;-inDir &lt;inDir&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;input directory name where directory/file
-                structures are stored; default is the current directory.
-        </code><br/>
-        <code>&nbsp;&nbsp;-root &lt;test space root&gt;</code><br/>
-        <code>&nbsp;&nbsp;&nbsp;&nbsp;the name of the root directory
which the 
-                new namespace is going to be placed under; 
-                default is "/testLoadSpace".</code><br/>
-                                </p>
-		        </section>
-                </section>
+        </p>
+         <p>
+        The synopsis of the command is:
+         </p>
+         <source>java DataGenerator [options]</source>
+         <p>Options include:</p>
+         <ul>
+    <li>
+        <code>-inDir &lt;inDir&gt;</code><br/>
+        Input directory name where directory/file
+        structures are stored; default is the current directory.
+    </li>
+    <li>
+        <code>-root &lt;test space root&gt;</code><br/>
+        The name of the root directory which the 
+        new namespace is going to be placed under; 
+        default is "/testLoadSpace".
+    </li>
+     </ul>
+	</section>
+    </section>
 	</body>
 </document>

Modified: hadoop/hdfs/branches/HDFS-326/src/docs/src/documentation/content/xdocs/faultinject_framework.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/docs/src/documentation/content/xdocs/faultinject_framework.xml?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/docs/src/documentation/content/xdocs/faultinject_framework.xml
(original)
+++ hadoop/hdfs/branches/HDFS-326/src/docs/src/documentation/content/xdocs/faultinject_framework.xml
Sat Nov 28 20:05:56 2009
@@ -21,41 +21,40 @@
 
 <document>
   <header>
-    <title>Fault injection Framework and Development Guide</title>
+    <title>Fault Injection Framework and Development Guide</title>
   </header>
 
   <body>
     <section>
       <title>Introduction</title>
-      <p>The following is a brief help for Hadoops' Fault Injection (FI)
-        Framework and Developer's Guide for those who will be developing
-        their own faults (aspects).
+      <p>This guide provides an overview of the Hadoop Fault Injection (FI) framework
for those
+      who will be developing their own faults (aspects).
       </p>
-      <p>An idea of Fault Injection (FI) is fairly simple: it is an
+      <p>The idea of fault injection is fairly simple: it is an
         infusion of errors and exceptions into an application's logic to
         achieve a higher coverage and fault tolerance of the system.
-        Different implementations of this idea are available at this day.
+        Different implementations of this idea are available today.
         Hadoop's FI framework is built on top of Aspect Oriented Paradigm
         (AOP) implemented by AspectJ toolkit.
       </p>
     </section>
     <section>
       <title>Assumptions</title>
-      <p>The current implementation of the framework assumes that the faults it
-        will be emulating are of non-deterministic nature. i.e. the moment
-        of a fault's happening isn't known in advance and is a coin-flip
-        based.
+      <p>The current implementation of the FI framework assumes that the faults it
+        will be emulating are of non-deterministic nature. That is,  the moment
+        of a fault's happening isn't known in advance and is a coin-flip based.
       </p>
     </section>
+    
     <section>
       <title>Architecture of the Fault Injection Framework</title>
       <figure src="images/FI-framework.gif" alt="Components layout" />
+      
       <section>
-        <title>Configuration management</title>
-        <p>This piece of the framework allow to
-          set expectations for faults to happen. The settings could be applied
-          either statically (in advance) or in a runtime. There's two ways to
-          configure desired level of faults in the framework:
+        <title>Configuration Management</title>
+        <p>This piece of the FI framework allows you to set expectations for faults
to happen. 
+        The settings can be applied either statically (in advance) or in runtime. 
+        The desired level of faults in the framework can be configured two ways:
         </p>
         <ul>
           <li>
@@ -71,31 +70,31 @@
           </li>
         </ul>
       </section>
+      
       <section>
-        <title>Probability model</title>
-        <p>This fundamentally is a coin flipper. The methods of this class are
+        <title>Probability Model</title>
+        <p>This is fundamentally a coin flipper. The methods of this class are
           getting a random number between 0.0
-          and 1.0 and then checking if new number has happened to be in the
-          range of
-          0.0 and a configured level for the fault in question. If that
-          condition
-          is true then the fault will occur.
+          and 1.0 and then checking if a new number has happened in the
+          range of 0.0 and a configured level for the fault in question. If that
+          condition is true then the fault will occur.
         </p>
-        <p>Thus, to guarantee a happening of a fault one needs to set an
+        <p>Thus, to guarantee the happening of a fault one needs to set an
           appropriate level to 1.0.
           To completely prevent a fault from happening its probability level
-          has to be set to 0.0
+          has to be set to 0.0.
         </p>
-        <p><strong>Nota bene</strong>: default probability level is set
to 0
+        <p><strong>Note</strong>: The default probability level is set
to 0
           (zero) unless the level is changed explicitly through the
           configuration file or in the runtime. The name of the default
           level's configuration parameter is
           <code>fi.*</code>
         </p>
       </section>
+      
       <section>
-        <title>Fault injection mechanism: AOP and AspectJ</title>
-        <p>In the foundation of Hadoop's fault injection framework lays
+        <title>Fault Injection Mechanism: AOP and AspectJ</title>
+        <p>The foundation of Hadoop's FI framework includes a
           cross-cutting concept implemented by AspectJ. The following basic
           terms are important to remember:
         </p>
@@ -122,8 +121,9 @@
           </li>
         </ul>
       </section>
+      
       <section>
-        <title>Existing join points</title>
+        <title>Existing Join Points</title>
         <p>
           The following readily available join points are provided by AspectJ:
         </p>
@@ -154,7 +154,7 @@
       </section>
     </section>
     <section>
-      <title>Aspects examples</title>
+      <title>Aspect Example</title>
       <source>
 package org.apache.hadoop.hdfs.server.datanode;
 
@@ -191,17 +191,22 @@
     }
   }
 }
-      </source>
-      <p>
-        The aspect has two main parts: the join point
+</source>
+
+      <p>The aspect has two main parts: </p>
+       <ul>
+        <li>The join point
         <code>pointcut callReceivepacket()</code>
         which servers as an identification mark of a specific point (in control
-        and/or data flow) in the life of an application. A call to the advice -
+        and/or data flow) in the life of an application. </li>
+        
+       <li> A call to the advice -
         <code>before () throws IOException : callReceivepacket()</code>
-        - will be
-        <a href="#Putting+it+all+together">injected</a>
-        before that specific spot of the application's code.
-      </p>
+        - will be injected (see
+        <a href="#Putting+it+all+together">Putting It All Together</a>)
+        before that specific spot of the application's code.</li>
+        </ul>
+      
 
       <p>The pointcut identifies an invocation of class'
         <code>java.io.OutputStream write()</code>
@@ -210,8 +215,8 @@
         take place within the body of method
         <code>receivepacket()</code>
         from class<code>BlockReceiver</code>.
-        The method can have any parameters and any return type. possible
-        invocations of
+        The method can have any parameters and any return type. 
+        Possible invocations of
         <code>write()</code>
         method happening anywhere within the aspect
         <code>BlockReceiverAspects</code>
@@ -222,24 +227,22 @@
         class. In such a case the names of the faults have to be different
         if a developer wants to trigger them separately.
       </p>
-      <p><strong>Note 2</strong>: After
-        <a href="#Putting+it+all+together">injection step</a>
+      <p><strong>Note 2</strong>: After the injection step (see
+        <a href="#Putting+it+all+together">Putting It All Together</a>)
         you can verify that the faults were properly injected by
-        searching for
-        <code>ajc</code>
-        keywords in a disassembled class file.
+        searching for <code>ajc</code> keywords in a disassembled class file.
       </p>
 
     </section>
     
     <section>
-      <title>Fault naming convention &amp; namespaces</title>
-      <p>For the sake of unified naming
+      <title>Fault Naming Convention and Namespaces</title>
+      <p>For the sake of a unified naming
       convention the following two types of names are recommended for a
       new aspects development:</p>
       <ul>
-        <li>Activity specific notation (as
-          when we don't care about a particular location of a fault's
+        <li>Activity specific notation 
+          (when we don't care about a particular location of a fault's
           happening). In this case the name of the fault is rather abstract:
           <code>fi.hdfs.DiskError</code>
         </li>
@@ -251,14 +254,11 @@
     </section>
 
     <section>
-      <title>Development tools</title>
+      <title>Development Tools</title>
       <ul>
-        <li>Eclipse
-          <a href="http://www.eclipse.org/ajdt/">AspectJ
-            Development Toolkit
-          </a>
-          might help you in the aspects' development
-          process.
+        <li>The Eclipse
+          <a href="http://www.eclipse.org/ajdt/">AspectJ Development Toolkit</a>

+          may help you when developing aspects
         </li>
         <li>IntelliJ IDEA provides AspectJ weaver and Spring-AOP plugins
         </li>
@@ -266,60 +266,67 @@
     </section>
 
     <section>
-      <title>Putting it all together</title>
-      <p>Faults (or aspects) have to injected (or woven) together before
-        they can be used. Here's a step-by-step instruction how this can be
-        done.</p>
-      <p>Weaving aspects in place:</p>
-      <source>
+      <title>Putting It All Together</title>
+      <p>Faults (aspects) have to injected (or woven) together before
+        they can be used. Follow these instructions:</p>
+        
+    <ul>
+      <li>To weave aspects in place use:
+<source>
 % ant injectfaults
-      </source>
-      <p>If you
-        misidentified the join point of your aspect then you'll see a
-        warning similar to this one below when 'injectfaults' target is
-        completed:</p>
-        <source>
+</source>
+      </li>
+      
+      <li>If you
+        misidentified the join point of your aspect you will see a
+        warning (similar to the one shown here) when 'injectfaults' target is
+        completed:
+<source>
 [iajc] warning at
 src/test/aop/org/apache/hadoop/hdfs/server/datanode/ \
           BlockReceiverAspects.aj:44::0
 advice defined in org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects
 has not been applied [Xlint:adviceDidNotMatch]
-        </source>
-      <p>It isn't an error, so the build will report the successful result.
-
-        To prepare dev.jar file with all your faults weaved in
-      place run (HDFS-475 pending)</p>
-        <source>
+</source>
+        </li>
+        
+      <li>It isn't an error, so the build will report the successful result. <br
/>
+     To prepare dev.jar file with all your faults weaved in place (HDFS-475 pending) use:
+<source>
 % ant jar-fault-inject
-        </source>
+</source>
+        </li>
 
-      <p>Test jars can be created by</p>
-        <source>
+     <li>To create test jars use:
+<source>
 % ant jar-test-fault-inject
-        </source>
+</source>
+      </li>
 
-      <p>To run HDFS tests with faults injected:</p>
-        <source>
+     <li>To run HDFS tests with faults injected use:
+<source>
 % ant run-test-hdfs-fault-inject
-        </source>
+</source>
+      </li>
+    </ul>
+        
       <section>
-        <title>How to use fault injection framework</title>
-        <p>Faults could be triggered by the following two meanings:
+        <title>How to Use the Fault Injection Framework</title>
+        <p>Faults can be triggered as follows:
         </p>
         <ul>
-          <li>In the runtime as:
-            <source>
+          <li>During runtime:
+<source>
 % ant run-test-hdfs -Dfi.hdfs.datanode.BlockReceiver=0.12
-            </source>
-            To set a certain level, e.g. 25%, of all injected faults one can run
+</source>
+            To set a certain level, for example 25%, of all injected faults use:
             <br/>
-            <source>
+<source>
 % ant run-test-hdfs-fault-inject -Dfi.*=0.25
-            </source>
+</source>
           </li>
-          <li>or from a program as follows:
-          </li>
-        </ul>
+          <li>From a program:
+  
         <source>
 package org.apache.hadoop.fs;
 
@@ -354,23 +361,23 @@
     //Cleaning up test test environment
   }
 }
-        </source>
+</source>
+        </li>
+        </ul>
+        
         <p>
-          as you can see above these two methods do the same thing. They are
-          setting the probability level of
-          <code>hdfs.datanode.BlockReceiver</code>
-          at 12%.
-          The difference, however, is that the program provides more
-          flexibility and allows to turn a fault off when a test doesn't need
-          it anymore.
+          As you can see above these two methods do the same thing. They are
+          setting the probability level of <code>hdfs.datanode.BlockReceiver</code>
+          at 12%. The difference, however, is that the program provides more
+          flexibility and allows you to turn a fault off when a test no longer needs it.
         </p>
       </section>
     </section>
 
     <section>
-      <title>Additional information and contacts</title>
-      <p>This two sources of information seem to be particularly
-        interesting and worth further reading:
+      <title>Additional Information and Contacts</title>
+      <p>These two sources of information are particularly
+        interesting and worth reading:
       </p>
       <ul>
         <li>
@@ -381,9 +388,8 @@
         <li>AspectJ Cookbook (ISBN-13: 978-0-596-00654-9)
         </li>
       </ul>
-      <p>Should you have any farther comments or questions to the author
-        check
-        <a href="http://issues.apache.org/jira/browse/HDFS-435">HDFS-435</a>
+      <p>If you have additional comments or questions for the author check
+        <a href="http://issues.apache.org/jira/browse/HDFS-435">HDFS-435</a>.
       </p>
     </section>
   </body>



Mime
View raw message