hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r612699 - in /lucene/hadoop/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/ClientProtocol.java src/java/org/apache/hadoop/dfs/DFSClient.java src/java/org/apache/hadoop/dfs/FSNamesystem.java
Date Thu, 17 Jan 2008 03:45:29 GMT
Author: shv
Date: Wed Jan 16 19:45:28 2008
New Revision: 612699

URL: http://svn.apache.org/viewvc?rev=612699&view=rev
Log:
HADOOP-1742. Improve JavaDoc documentation for HDFS classes. Contributed by Konstantin Shvachko.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=612699&r1=612698&r2=612699&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Jan 16 19:45:28 2008
@@ -251,6 +251,9 @@
     HADOOP-1989. Remove 'datanodecluster' command from bin/hadoop.
     (Sanjay Radia via shv)
 
+    HADOOP-1742. Improve JavaDoc documentation for ClientProtocol, DFSClient,
+    and FSNamesystem. (Konstantin Shvachko)
+
   OPTIMIZATIONS
 
     HADOOP-1898.  Release the lock protecting the last time of the last stack

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java?rev=612699&r1=612698&r2=612699&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java Wed Jan 16 19:45:28
2008
@@ -23,7 +23,8 @@
 import org.apache.hadoop.fs.permission.*;
 
 /**********************************************************************
- * ClientProtocol is used by a piece of DFS user code to communicate 
+ * ClientProtocol is used by user code via 
+ * {@link DistributedFileSystem} class to communicate 
  * with the NameNode.  User code can manipulate the directory namespace, 
  * as well as open/close file streams, etc.
  *
@@ -32,13 +33,8 @@
 
   /**
    * Compared to the previous version the following changes have been introduced:
-   * 16 : removed deprecated obtainLock() and releaseLock(). 
-   * 17 : getBlockSize replaced by getPreferredBlockSize
-   * 18 : datanodereport returns dead, live or all nodes.
-   * 19 : rollEditLog() returns a token to uniquely identify the editfile.
-   * 20 : getContentLength returns the total size in bytes of a directory subtree
-   * 21 : add lease holder as a parameter in abandonBlock(...)
-   * 22 : Serialization of FileStatus has changed.
+   * (Only the latest change is reflected.
+   * The log of historical changes can be retrieved from the svn).
    * 23 : added setOwner(...) and setPermission(...); changed create(...) and mkdir(...)
    */
   public static final long versionID = 23L;
@@ -47,12 +43,14 @@
   // File contents
   ///////////////////////////////////////
   /**
-   * Open an existing file and get block locations within the specified range. 
+   * Open an existing file for read and get block locations within 
+   * the specified range. 
+   * <p>
    * Return {@link LocatedBlocks} which contains
    * file length, blocks and their locations.
    * DataNode locations for each block are sorted by
    * the distance to the client's address.
-   * 
+   * <p>
    * The client will then have to contact
    * one of the indicated DataNodes to obtain the actual data.  There
    * is no need to call close() or any other function after
@@ -86,24 +84,33 @@
                                           long length) throws IOException;
 
   /**
-   * Create a new file.  Get back block and datanode info,
-   * which describes where the first block should be written.
-   *
-   * Successfully calling this method prevents any other 
-   * client from creating a file under the given name, but
-   * the caller must invoke complete() for the file to be
-   * added to the filesystem.
-   *
+   * Create a new file entry in the namespace.
+   * <p>
+   * This will create an empty file specified by the source path.
+   * The path should reflect a full path originated at the root.
+   * The name-node does not have a notion of "current" directory for a client.
+   * <p>
+   * Once created, the file is visible and available for read to other clients.
+   * Although, other clients cannot {@link #delete(String)}, re-create or 
+   * {@link #rename(String, String)} it until the file is completed or 
+   * abandoned implicitely by {@link #abandonFileInProgress(String, String)}
+   * or explicitely as a result of lease expiration.
+   * <p>
    * Blocks have a maximum size.  Clients that intend to
-   * create multi-block files must also use reportWrittenBlock()
-   * and addBlock().
-   *
-   * If permission denied,
-   * an {@link AccessControlException} will be thrown as an
-   * {@link org.apache.hadoop.ipc.RemoteException}.
+   * create multi-block files must also use {@link #addBlock(String, String)}.
    *
-   * @param src The path of the directory being created
-   * @param masked The masked permission
+   * @param src path of the file being created.
+   * @param masked masked permission.
+   * @param clientName name of the current client.
+   * @param overwrite indicates whether the file should be 
+   * overwritten if it already exists.
+   * @param replication block replication factor.
+   * @param blockSize maximum block size.
+   * 
+   * @throws AccessControlException if permission to create file is 
+   * denied by the system. As usually on the client side the exception will 
+   * be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+   * @throws IOException if other errors occur.
    */
   public void create(String src, 
                      FsPermission masked,
@@ -115,7 +122,7 @@
 
   /**
    * Set replication for an existing file.
-   * 
+   * <p>
    * The NameNode sets replication to the new value and returns.
    * The actual block replication is not expected to be performed during  
    * this method call. The blocks will be populated or removed in the 
@@ -148,11 +155,10 @@
       ) throws IOException;
 
   /**
-   * If the client has not yet called reportWrittenBlock(), it can
-   * give up on it by calling abandonBlock().  The client can then
+   * The client can give up on a blcok by calling abandonBlock().
+   * The client can then
    * either obtain a new block, or complete or abandon the file.
-   *
-   * Any partial writes to the block will be garbage-collected.
+   * Any partial writes to the block will be discarded.
    */
   public void abandonBlock(Block b, String src, String holder
       ) throws IOException;
@@ -162,11 +168,10 @@
    * indicated filename (which must currently be open for writing)
    * should call addBlock().  
    *
-   * addBlock() returns block and datanode info, just like the initial
-   * call to create().  
-   *
-   * A null response means the NameNode could not allocate a block,
-   * and that the caller should try again.
+   * addBlock() allocates a new block and datanodes the block data
+   * should be replicated to.
+   * 
+   * @return LocatedBlock allocated block information.
    */
   public LocatedBlock addBlock(String src, String clientName) throws IOException;
 
@@ -174,7 +179,7 @@
    * A client that wants to abandon writing to the current file
    * should call abandonFileInProgress().  After this call, any
    * client can call create() to obtain the filename.
-   *
+   * <p>
    * Any blocks that have been written for the file will be 
    * garbage-collected.
    * @param src The filename
@@ -208,17 +213,29 @@
   // Namespace management
   ///////////////////////////////////////
   /**
-   * Rename an item in the fs namespace
+   * Rename an item in the file system namespace.
+   * 
+   * @param src existing file or directory name.
+   * @param dst new name.
+   * @return true if successful, or false if the old name does not exist
+   * or if the new name already belongs to the namespace.
+   * @throws IOException if the new name is invalid.
    */
   public boolean rename(String src, String dst) throws IOException;
 
   /**
-   * Remove the given filename from the filesystem
+   * Delete the given file or directory from the file system.
+   * <p>
+   * Any blocks belonging to the deleted files will be garbage-collected.
+   * 
+   * @param src existing name.
+   * @return true only if the existing file or directory was actually removed 
+   * from the file system. 
    */
   public boolean delete(String src) throws IOException;
 
   /**
-   * Check whether the given file exists
+   * Check whether the given file exists.
    */
   public boolean exists(String src) throws IOException;
 
@@ -231,13 +248,12 @@
    * Create a directory (or hierarchy of directories) with the given
    * name and permission.
    *
-   * If permission denied,
-   * an {@link AccessControlException} will be thrown as an
-   * {@link org.apache.hadoop.ipc.RemoteException}.
-   *
    * @param src The path of the directory being created
    * @param masked The masked permission of the directory being created
    * @return True if the operation success.
+   * @throws {@link AccessControlException} if permission to create file is 
+   * denied by the system. As usually on the client side the exception will 
+   * be wraped into {@link org.apache.hadoop.ipc.RemoteException}.
    */
   public boolean mkdirs(String src, FsPermission masked) throws IOException;
 
@@ -258,7 +274,7 @@
    * Clearly, it would be bad if a client held a bunch of locks
    * that it never gave up.  This can happen easily if the client
    * dies unexpectedly.
-   *
+   * <p>
    * So, the NameNode will revoke the locks and live file-creates
    * for clients that it thinks have died.  A client tells the
    * NameNode that it is still alive by periodically calling
@@ -270,11 +286,12 @@
 
   /**
    * Get a set of statistics about the filesystem.
-   * Right now, only two values are returned.
-   * [0] contains the total storage capacity of the system,
-   *     in bytes.
-   * [1] contains the total used space of the system, in bytes.
-   * [2] contains the available storage of the system, in bytes.
+   * Right now, only three values are returned.
+   * <ul>
+   * <li> [0] contains the total storage capacity of the system, in bytes.</li>
+   * <li> [1] contains the total used space of the system, in bytes.</li>
+   * <li> [2] contains the available storage of the system, in bytes.</li>
+   * </ul>
    */
   public long[] getStats() throws IOException;
 
@@ -405,14 +422,16 @@
    */
   public void metaSave(String filename) throws IOException;
 
-  /* Get the file info for a specific file or directory.
+  /**
+   * Get the file info for a specific file or directory.
    * @param src The string representation of the path to the file
    * @throws IOException if file does not exist
    * @return object containing information regarding the file
    */
   public DFSFileInfo getFileInfo(String src) throws IOException;
 
-  /* Get the total size of all files and directories rooted at
+  /**
+   * Get the total size of all files and directories rooted at
    * the specified directory.
    * @param src The string representation of the path
    * @return size of directory subtree in bytes

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?rev=612699&r1=612698&r2=612699&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Wed Jan 16 19:45:28
2008
@@ -337,7 +337,7 @@
    * Call
    * {@link #create(String,FsPermission,boolean,short,long,Progressable,int)}
    * with default permission.
-   * @see FsPermission#getDefault(Configuration)
+   * @see FsPermission#getDefault()
    */
   public OutputStream create(String src,
       boolean overwrite,
@@ -361,7 +361,7 @@
    * @param replication block replication
    * @return output stream
    * @throws IOException
-   * @see {@link ClientProtocol#create(String, FsPermission, String, boolean, short, long)}
+   * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
    */
   public OutputStream create(String src, 
                              FsPermission permission,
@@ -399,8 +399,8 @@
   }
 
   /**
-   * Make a direct connection to namenode and manipulate structures
-   * there.
+   * Rename file or directory.
+   * See {@link ClientProtocol#rename(String, String)}. 
    */
   public boolean rename(String src, String dst) throws IOException {
     checkOpen();
@@ -408,8 +408,8 @@
   }
 
   /**
-   * Make a direct connection to namenode and manipulate structures
-   * there.
+   * Delete file or directory.
+   * See {@link ClientProtocol#delete(String)}. 
    */
   public boolean delete(String src) throws IOException {
     checkOpen();
@@ -528,7 +528,7 @@
    * @param permission The permission of the directory being created.
    * If permission == null, use {@link FsPermission#getDefault()}.
    * @return True if the operation success.
-   * @see {@link ClientProtocol#mkdirs(String, FsPermission)}
+   * @see ClientProtocol#mkdirs(String, FsPermission)
    */
   public boolean mkdirs(String src, FsPermission permission)throws IOException{
     checkOpen();
@@ -1461,7 +1461,7 @@
     private Progressable progress;
     /**
      * Create a new output stream to the given DataNode.
-     * @see {@link ClientProtocol#create(String, FsPermission, String, boolean, short, long)}
+     * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
      */
     public DFSOutputStream(String src, FsPermission masked,
                            boolean overwrite,

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=612699&r1=612698&r2=612699&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Wed Jan 16 19:45:28
2008
@@ -373,7 +373,8 @@
                              getDistributedUpgradeVersion());
   }
 
-  /** Close down this filesystem manager.
+  /**
+   * Close down this file system manager.
    * Causes heartbeat and lease daemons to stop; waits briefly for
    * them to finish, but a short timeout returns control back to caller.
    */
@@ -645,7 +646,8 @@
         results.toArray(new BlockWithLocations[results.size()]));
   }
   
-  /* Get all valid locations of the block & add the block to results
+  /**
+   * Get all valid locations of the block & add the block to results
    * return the length of the added block; 0 if the block is not added
    */
   private long addBlock(Block block, List<BlockWithLocations> results) {
@@ -879,6 +881,14 @@
                             text + " is less than the required minimum " + minReplication);
   }
 
+  /**
+   * Create a new file entry in the namespace.
+   * 
+   * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
+   * 
+   * @throws IOException if file name is invalid
+   *         {@link FSDirectory#isValidToCreate(String)}.
+   */
   void startFile(String src, PermissionStatus permissions,
                  String holder, String clientMachine,
                  boolean overwrite, short replication, long blockSize
@@ -888,17 +898,6 @@
     getEditLog().logSync();
   }
 
-  /**
-   * The client would like to create a new block for the indicated
-   * filename.  Return an array that consists of the block, plus a set 
-   * of machines.  The first on this list should be where the client 
-   * writes data.  Subsequent items in the list must be provided in
-   * the connection to the first datanode.
-   * Return an array that consists of the block, plus a set
-   * of machines
-   * @throws IOException if the filename is invalid
-   *         {@link FSDirectory#isValidToCreate(String)}.
-   */
   private synchronized void startFileInternal(String src,
                                               PermissionStatus permissions,
                                               String holder, 
@@ -2974,7 +2973,7 @@
     }
   } 
 
-  /*
+  /**
    * Counts the number of nodes in the given list into active and
    * decommissioned counters.
    */
@@ -2993,13 +2992,17 @@
     return new NumberReplicas(live, count);
   }
 
-  /** return the number of nodes that are live and decommissioned. */
+  /**
+   * Return the number of nodes that are live and decommissioned.
+   */
   private NumberReplicas countNodes(Block b) {
     return countNodes(blocksMap.nodeIterator(b));
   }
 
-  /** Returns a newly allocated list of all nodes. Returns a count of
-  * live and decommissioned nodes. */
+  /**
+   * Returns a newly allocated list of all nodes. Returns a count of
+   * live and decommissioned nodes.
+   */
   ArrayList<DatanodeDescriptor> containingNodeList(Block b, NumberReplicas[] numReplicas)
{
     ArrayList<DatanodeDescriptor> nodeList = 
       new ArrayList<DatanodeDescriptor>();
@@ -3021,7 +3024,8 @@
     }
     return nodeList;
   }
-  /*
+
+  /**
    * Return true if there are any blocks on this node that have not
    * yet reached their replication factor. Otherwise returns false.
    */
@@ -3212,7 +3216,9 @@
     }
   }
   
-  // Keeps track of which datanodes are allowed to connect to the namenode.
+  /** 
+   * Keeps track of which datanodes are allowed to connect to the namenode.
+   */
   private boolean inHostsList(DatanodeID node) {
     Set<String> hostsList = hostsReader.getHosts();
     return (hostsList.isEmpty() || 
@@ -3801,7 +3807,7 @@
     return getEditLog().getFsEditName();
   }
 
-  /*
+  /**
    * This is called just before a new checkpoint is uploaded to the
    * namenode.
    */
@@ -3823,7 +3829,7 @@
     ckptState = CheckpointStates.UPLOAD_START;
   }
 
-  /*
+  /**
    * This is called when a checkpoint upload finishes successfully.
    */
   synchronized void checkpointUploadDone() {
@@ -3900,7 +3906,7 @@
   /**
    * Check whether current user have permissions to access the path.
    * For more details of the parameters, see
-   * {@link PermissionChecker#checkPermission(INodeDirectory, boolean, FsAction, FsAction,
FsAction)}.
+   * {@link PermissionChecker#checkPermission(String, INodeDirectory, boolean, FsAction,
FsAction, FsAction, FsAction)}.
    */
   private PermissionChecker checkPermission(String path, boolean doCheckOwner,
       FsAction ancestorAccess, FsAction parentAccess, FsAction access,
@@ -3915,7 +3921,7 @@
     return pc;
   }
 
-  /*
+  /**
    * Check to see if we have exceeded the limit on the number
    * of inodes.
    */



Mime
View raw message