hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r540715 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/fs/ src/webapps/datanode/
Date Tue, 22 May 2007 19:40:12 GMT
Author: cutting
Date: Tue May 22 12:40:11 2007
New Revision: 540715

URL: http://svn.apache.org/viewvc?view=rev&rev=540715
Log:
HADOOP-894. Change HDFS so that the client only retrieves a limited number of block locations per request from the namenode.  Contributed by Konstantin.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DfsPath.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FileUnderConstruction.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/LocatedBlock.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java
    lucene/hadoop/trunk/src/webapps/datanode/browseBlock.jsp
    lucene/hadoop/trunk/src/webapps/datanode/browseDirectory.jsp
    lucene/hadoop/trunk/src/webapps/datanode/tail.jsp

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Tue May 22 12:40:11 2007
@@ -47,6 +47,10 @@
 
  15. HADOOP-1401.  Add contrib/hbase javadoc to tree.  (stack via cutting)
 
+ 16. HADOOP-894.  Change HDFS so that the client only retrieves a limited
+     number of block locations per request from the namenode.
+     (Konstantin Shvachko via cutting)
+
 
 Branch 0.13 (unreleased changes)
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java Tue May 22 12:40:11 2007
@@ -29,24 +29,56 @@
  **********************************************************************/
 interface ClientProtocol extends VersionedProtocol {
 
-  /*
-   * 11: metasave() added and reportWrittenBlock() removed.
+  /**
+   * Compared to the previous version the following changes have been introduced:
+   * 12: open() prototype changed; 
+   *     getBlockLocations() added; 
+   *     DFSFileInfo format changed;
+   *     getHints() removed.
    */
-  public static final long versionID = 11L;  
+  public static final long versionID = 12L;
   
   ///////////////////////////////////////
   // File contents
   ///////////////////////////////////////
   /**
-   * Open an existing file, at the given name.  Returns block 
-   * and DataNode info.  DataNodes for each block are sorted by
+   * Open an existing file and get block locations within the specified range. 
+   * Return {@link LocatedBlocks} which contains
+   * file length, blocks and their locations.
+   * DataNode locations for each block are sorted by
    * the distance to the client's address.
+   * 
    * The client will then have to contact
-   * each indicated DataNode to obtain the actual data.  There
+   * one of the indicated DataNodes to obtain the actual data.  There
    * is no need to call close() or any other function after
    * calling open().
+   * 
+   * @param src file name
+   * @param offset range start offset
+   * @param length range length
+   * @return file length and array of blocks with their locations
+   * @throws IOException
    */
-  public LocatedBlock[] open(String src) throws IOException;
+  public LocatedBlocks open(String src, 
+                            long offset,
+                            long length) throws IOException;
+  
+  /**
+   * Get locations of the blocks of the specified file within the specified range.
+   * DataNode locations for each block are sorted by
+   * the proximity to the client.
+   * 
+   * @see #open(String, long, long)
+   * 
+   * @param src file name
+   * @param offset range start offset
+   * @param length range length
+   * @return file length and array of blocks with their locations
+   * @throws IOException
+   */
+  public LocatedBlocks  getBlockLocations(String src,
+                                          long offset,
+                                          long length) throws IOException;
 
   /**
    * Create a new file.  Get back block and datanode info,
@@ -181,17 +213,6 @@
   // System issues and management
   ///////////////////////////////////////
   /**
-   * getHints() returns a list of hostnames that store data for
-   * a specific file region.  It returns a set of hostnames for 
-   * every block within the indicated region.
-   *
-   * This function is very useful when writing code that considers
-   * data-placement when performing operations.  For example, the
-   * MapReduce system tries to schedule tasks on the same machines
-   * as the data-block the task processes. 
-   */
-  public String[][] getHints(String src, long start, long len) throws IOException;
-  /**
    * obtainLock() is used for lock managemnet.  It returns true if
    * the lock has been seized correctly.  It returns false if the
    * lock could not be obtained, and the client should try again.
@@ -308,7 +329,6 @@
 
   /**
    * Tells the namenode to reread the hosts and exclude files. 
-   * @return True if the call was successful, false otherwise.
    * @throws IOException
    */
   public void refreshNodes() throws IOException;
@@ -325,7 +345,6 @@
    * Closes the current edit log and opens a new one. The 
    * call fails if there are already two or more edits log files or
    * if the file system is in SafeMode.
-   * @return True if the call was successful, false otherwise.
    * @throws IOException
    */
   public void rollEditLog() throws IOException;
@@ -334,7 +353,6 @@
    * Rolls the fsImage log. It removes the old fsImage, copies the
    * new image to fsImage, removes the old edits and renames edits.new 
    * to edits. The call fails if any of the four files are missing.
-   * @return True if the call was successful, false otherwise.
    * @throws IOException
    */
   public void rollFsImage() throws IOException;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Tue May 22 12:40:11 2007
@@ -47,7 +47,7 @@
  ********************************************************/
 class DFSClient implements FSConstants {
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.DFSClient");
-  static int MAX_BLOCK_ACQUIRE_FAILURES = 3;
+  static final int MAX_BLOCK_ACQUIRE_FAILURES = 3;
   private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   private static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024;
   ClientProtocol namenode;
@@ -224,12 +224,36 @@
   }
     
   /**
-   * Get hints about the location of the indicated block(s).  The
-   * array returned is as long as there are blocks in the indicated
-   * range.  Each block may have one or more locations.
+   * Get hints about the location of the indicated block(s).
+   * 
+   * getHints() returns a list of hostnames that store data for
+   * a specific file region.  It returns a set of hostnames for 
+   * every block within the indicated region.
+   *
+   * This function is very useful when writing code that considers
+   * data-placement when performing operations.  For example, the
+   * MapReduce system tries to schedule tasks on the same machines
+   * as the data-block the task processes. 
    */
-  public String[][] getHints(UTF8 src, long start, long len) throws IOException {
-    return namenode.getHints(src.toString(), start, len);
+  public String[][] getHints(String src, long start, long length) 
+    throws IOException {
+    LocatedBlocks blocks = namenode.getBlockLocations(src, start, length);
+    if (blocks == null) {
+      return new String[0][];
+    }
+    int nrBlocks = blocks.locatedBlockCount();
+    String[][] hints = new String[nrBlocks][];
+    int idx = 0;
+    for (LocatedBlock blk : blocks.getLocatedBlocks()) {
+      assert idx < nrBlocks : "Incorrect index";
+      DatanodeInfo[] locations = blk.getLocations();
+      hints[idx] = new String[locations.length];
+      for (int hCnt = 0; hCnt < locations.length; hCnt++) {
+        hints[idx][hCnt] = locations[hCnt].getHostName();
+      }
+      idx++;
+    }
+    return hints;
   }
 
   /**
@@ -413,10 +437,10 @@
 
   /**
    * Dumps DFS data structures into specified file.
-   * See {@link ClientProtocol#metaSave()} 
+   * See {@link ClientProtocol#metaSave(String)} 
    * for more details.
    * 
-   * @see ClientProtocol#metaSave()
+   * @see ClientProtocol#metaSave(String)
    */
   public void metaSave(String pathname) throws IOException {
     namenode.metaSave(pathname);
@@ -526,23 +550,22 @@
       this.addr = addr;
     }
   }
-        
+
   /****************************************************************
    * DFSInputStream provides bytes from a named file.  It handles 
    * negotiation of the namenode and various datanodes as necessary.
    ****************************************************************/
   class DFSInputStream extends FSInputStream {
     private Socket s = null;
-    boolean closed = false;
+    private boolean closed = false;
 
     private String src;
+    private long prefetchSize = 10 * defaultBlockSize;
     private DataInputStream blockStream;
-    private Block blocks[] = null;
-    private DatanodeInfo nodes[][] = null;
+    private LocatedBlocks locatedBlocks = null;
     private DatanodeInfo currentNode = null;
     private Block currentBlock = null;
     private long pos = 0;
-    private long filelen = 0;
     private long blockEnd = -1;
     private TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
         
@@ -550,42 +573,33 @@
      */
     public DFSInputStream(String src) throws IOException {
       this.src = src;
+      prefetchSize = conf.getLong("dfs.read.prefetch.size", prefetchSize);
       openInfo();
       this.blockStream = null;
-      for (int i = 0; i < blocks.length; i++) {
-        this.filelen += blocks[i].getNumBytes();
-      }
     }
 
     /**
      * Grab the open-file info from namenode
      */
     synchronized void openInfo() throws IOException {
-      Block oldBlocks[] = this.blocks;
+      LocatedBlocks newInfo = namenode.open(src, 0, prefetchSize);
 
-      LocatedBlock results[] = namenode.open(src);            
-      Vector<Block> blockV = new Vector<Block>();
-      Vector<DatanodeInfo[]> nodeV = new Vector<DatanodeInfo[]>();
-      for (int i = 0; i < results.length; i++) {
-        blockV.add(results[i].getBlock());
-        nodeV.add(results[i].getLocations());
-      }
-      Block[] newBlocks = blockV.toArray(new Block[blockV.size()]);
-
-      if (oldBlocks != null) {
-        for (int i = 0; i < oldBlocks.length; i++) {
-          if (!oldBlocks[i].equals(newBlocks[i])) {
+      if (locatedBlocks != null) {
+        Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocks().iterator();
+        Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator();
+        while (oldIter.hasNext() && newIter.hasNext()) {
+          if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) {
             throw new IOException("Blocklist for " + src + " has changed!");
           }
         }
-        if (oldBlocks.length != newBlocks.length) {
-          throw new IOException("Blocklist for " + src + " now has different length");
-        }
       }
-      this.blocks = newBlocks;
-      this.nodes = nodeV.toArray(new DatanodeInfo[nodeV.size()][]);
+      this.locatedBlocks = newInfo;
       this.currentNode = null;
     }
+    
+    public long getFileLength() {
+      return (locatedBlocks == null) ? 0 : locatedBlocks.getFileLength();
+    }
 
     /**
      * Returns the datanode from which the stream is currently reading.
@@ -601,13 +615,79 @@
       return currentBlock;
     }
 
+    /**
+     * Return collection of blocks that has already been located.
+     */
+    synchronized List<LocatedBlock> getAllBlocks() throws IOException {
+      return getBlockRange(0, this.getFileLength());
+    }
 
     /**
-     * Used by the automatic tests to detemine blocks locations of a
-     * file
+     * Get block at the specified position.
+     * Fetch it from the namenode if not cached.
+     * 
+     * @param offset
+     * @return
+     * @throws IOException
      */
-    synchronized DatanodeInfo[][] getDataNodes() {
-      return nodes;
+    private LocatedBlock getBlockAt(long offset) throws IOException {
+      assert (locatedBlocks != null) : "locatedBlocks is null";
+      // search cached blocks first
+      int targetBlockIdx = locatedBlocks.findBlock(offset);
+      if (targetBlockIdx < 0) { // block is not cached
+        targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
+        // fetch more blocks
+        LocatedBlocks newBlocks;
+        newBlocks = namenode.getBlockLocations(src, offset, prefetchSize);
+        assert (newBlocks != null) : "Could not find target position " + offset;
+        locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
+      }
+      LocatedBlock blk = locatedBlocks.get(targetBlockIdx);
+      // update current position
+      this.pos = offset;
+      this.blockEnd = blk.getStartOffset() + blk.getBlockSize() - 1;
+      this.currentBlock = blk.getBlock();
+      return blk;
+    }
+
+    /**
+     * Get blocks in the specified range.
+     * Fetch them from the namenode if not cached.
+     * 
+     * @param offset
+     * @param length
+     * @return
+     * @throws IOException
+     */
+    private List<LocatedBlock> getBlockRange(long offset, long length) 
+                                                        throws IOException {
+      assert (locatedBlocks != null) : "locatedBlocks is null";
+      List<LocatedBlock> blockRange = new ArrayList<LocatedBlock>();
+      // search cached blocks first
+      int blockIdx = locatedBlocks.findBlock(offset);
+      if (blockIdx < 0) { // block is not cached
+        blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
+      }
+      long remaining = length;
+      long curOff = offset;
+      while(remaining > 0) {
+        LocatedBlock blk = null;
+        if(blockIdx < locatedBlocks.locatedBlockCount())
+          blk = locatedBlocks.get(blockIdx);
+        if (blk == null || curOff < blk.getStartOffset()) {
+          LocatedBlocks newBlocks;
+          newBlocks = namenode.getBlockLocations(src, curOff, remaining);
+          locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
+          continue;
+        }
+        assert curOff >= blk.getStartOffset() : "Block not found";
+        blockRange.add(blk);
+        long bytesRead = blk.getStartOffset() + blk.getBlockSize() - curOff;
+        remaining -= bytesRead;
+        curOff += bytesRead;
+        blockIdx++;
+      }
+      return blockRange;
     }
 
     /**
@@ -615,7 +695,7 @@
      * We get block ID and the IDs of the destinations at startup, from the namenode.
      */
     private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
-      if (target >= filelen) {
+      if (target >= getFileLength()) {
         throw new IOException("Attempted to read past end of file");
       }
 
@@ -627,24 +707,9 @@
       //
       // Compute desired block
       //
-      int targetBlock = -1;
-      long targetBlockStart = 0;
-      long targetBlockEnd = 0;
-      for (int i = 0; i < blocks.length; i++) {
-        long blocklen = blocks[i].getNumBytes();
-        targetBlockEnd = targetBlockStart + blocklen - 1;
-
-        if (target >= targetBlockStart && target <= targetBlockEnd) {
-          targetBlock = i;
-          break;
-        } else {
-          targetBlockStart = targetBlockEnd + 1;                    
-        }
-      }
-      if (targetBlock < 0) {
-        throw new IOException("Impossible situation: could not find target position " + target);
-      }
-      long offsetIntoBlock = target - targetBlockStart;
+      LocatedBlock targetBlock = getBlockAt(target);
+      assert (target==this.pos) : "Wrong postion " + pos + " expect " + target;
+      long offsetIntoBlock = target - targetBlock.getStartOffset();
 
       //
       // Connect to best DataNode for desired Block, with potential offset
@@ -663,9 +728,10 @@
           //
           // Xmit header info to datanode
           //
+          Block block = targetBlock.getBlock();
           DataOutputStream out = new DataOutputStream(new BufferedOutputStream(s.getOutputStream()));
           out.write(OP_READSKIP_BLOCK);
-          blocks[targetBlock].write(out);
+          block.write(out);
           out.writeLong(offsetIntoBlock);
           out.flush();
 
@@ -675,16 +741,13 @@
           DataInputStream in = new DataInputStream(new BufferedInputStream(s.getInputStream()));
           long curBlockSize = in.readLong();
           long amtSkipped = in.readLong();
-          if (curBlockSize != blocks[targetBlock].len) {
-            throw new IOException("Recorded block size is " + blocks[targetBlock].len + ", but datanode reports size of " + curBlockSize);
+          if (curBlockSize != block.getNumBytes()) {
+            throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode reports size of " + curBlockSize);
           }
           if (amtSkipped != offsetIntoBlock) {
             throw new IOException("Asked for offset of " + offsetIntoBlock + ", but only received offset of " + amtSkipped);
           }
 
-          this.pos = target;
-          this.blockEnd = targetBlockEnd;
-          this.currentBlock = blocks[targetBlock];
           this.blockStream = in;
           return chosenNode;
         } catch (IOException ex) {
@@ -731,7 +794,7 @@
         throw new IOException("Stream closed");
       }
       int result = -1;
-      if (pos < filelen) {
+      if (pos < getFileLength()) {
         if (pos > blockEnd) {
           currentNode = blockSeekTo(pos);
         }
@@ -751,7 +814,7 @@
       if (closed) {
         throw new IOException("Stream closed");
       }
-      if (pos < filelen) {
+      if (pos < getFileLength()) {
         int retries = 2;
         while (retries > 0) {
           try {
@@ -780,24 +843,25 @@
     }
 
         
-    private DNAddrPair chooseDataNode(int blockId)
+    private DNAddrPair chooseDataNode(LocatedBlock block)
       throws IOException {
       int failures = 0;
       while (true) {
+        DatanodeInfo[] nodes = block.getLocations();
         try {
-          DatanodeInfo chosenNode = bestNode(nodes[blockId], deadNodes);
+          DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
           InetSocketAddress targetAddr = DataNode.createSocketAddr(chosenNode.getName());
           return new DNAddrPair(chosenNode, targetAddr);
         } catch (IOException ie) {
-          String blockInfo =
-            blocks[blockId]+" file="+src;
+          String blockInfo = block.getBlock() + " file=" + src;
           if (failures >= MAX_BLOCK_ACQUIRE_FAILURES) {
             throw new IOException("Could not obtain block: " + blockInfo);
           }
-          if (nodes[blockId] == null || nodes[blockId].length == 0) {
+          
+          if (nodes == null || nodes.length == 0) {
             LOG.info("No node available for block: " + blockInfo);
           }
-          LOG.info("Could not obtain block " + blockId + " from any node:  " + ie);
+          LOG.info("Could not obtain block " + block.getBlock() + " from any node:  " + ie);
           try {
             Thread.sleep(3000);
           } catch (InterruptedException iex) {
@@ -810,14 +874,14 @@
       }
     } 
         
-    private void fetchBlockByteRange(int blockId, long start,
+    private void fetchBlockByteRange(LocatedBlock block, long start,
                                      long end, byte[] buf, int offset) throws IOException {
       //
       // Connect to best DataNode for desired Block, with potential offset
       //
       Socket dn = null;
       while (dn == null) {
-        DNAddrPair retval = chooseDataNode(blockId);
+        DNAddrPair retval = chooseDataNode(block);
         DatanodeInfo chosenNode = retval.info;
         InetSocketAddress targetAddr = retval.addr;
             
@@ -831,7 +895,7 @@
           //
           DataOutputStream out = new DataOutputStream(new BufferedOutputStream(dn.getOutputStream()));
           out.write(OP_READ_RANGE_BLOCK);
-          blocks[blockId].write(out);
+          block.getBlock().write(out);
           out.writeLong(start);
           out.writeLong(end);
           out.flush();
@@ -843,9 +907,10 @@
           long curBlockSize = in.readLong();
           long actualStart = in.readLong();
           long actualEnd = in.readLong();
-          if (curBlockSize != blocks[blockId].len) {
+          if (curBlockSize != block.getBlockSize()) {
             throw new IOException("Recorded block size is " +
-                                  blocks[blockId].len + ", but datanode reports size of " +
+                                  block.getBlockSize() + 
+                                  ", but datanode reports size of " +
                                   curBlockSize);
           }
           if ((actualStart != start) || (actualEnd != end)) {
@@ -854,6 +919,9 @@
                                   "-" + actualEnd);
           }
           int nread = in.read(buf, offset, (int)(end - start + 1));
+          assert nread == (int)(end - start + 1) : 
+            "Incorrect number of bytes read " + nread
+            + ". Expacted " + (int)(end - start + 1);
         } catch (IOException ex) {
           // Put chosen node into dead list, continue
           LOG.debug("Failed to connect to " + targetAddr + ":" 
@@ -869,44 +937,47 @@
         }
       }
     }
-        
-    public int read(long position, byte[] buf, int off, int len)
+
+    /**
+     * Read bytes starting from the specified position.
+     * 
+     * @param position start read from this position
+     * @param buffer read buffer
+     * @param offset offset into buffer
+     * @param length number of bytes to read
+     * 
+     * @return actual number of bytes read
+     */
+    public int read(long position, byte[] buffer, int offset, int length)
       throws IOException {
       // sanity checks
       checkOpen();
       if (closed) {
         throw new IOException("Stream closed");
       }
+      long filelen = getFileLength();
       if ((position < 0) || (position > filelen)) {
         return -1;
       }
-      int realLen = len;
-      if ((position + len) > filelen) {
+      int realLen = length;
+      if ((position + length) > filelen) {
         realLen = (int)(filelen - position);
       }
+      
       // determine the block and byte range within the block
       // corresponding to position and realLen
-      int targetBlock = -1;
-      long targetStart = 0;
-      long targetEnd = 0;
-      for (int idx = 0; idx < blocks.length; idx++) {
-        long blocklen = blocks[idx].getNumBytes();
-        targetEnd = targetStart + blocklen - 1;
-        if (position >= targetStart && position <= targetEnd) {
-          targetBlock = idx;
-          targetStart = position - targetStart;
-          targetEnd = Math.min(blocklen, targetStart + realLen) - 1;
-          realLen = (int)(targetEnd - targetStart + 1);
-          break;
-        }
-        targetStart += blocklen;
-      }
-      if (targetBlock < 0) {
-        throw new IOException(
-                              "Impossible situation: could not find target position "+
-                              position);
+      List<LocatedBlock> blockRange = getBlockRange(position, realLen);
+      int remaining = realLen;
+      for (LocatedBlock blk : blockRange) {
+        long targetStart = position - blk.getStartOffset();
+        long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart);
+        fetchBlockByteRange(blk, targetStart, 
+                            targetStart + bytesToRead - 1, buffer, offset);
+        remaining -= bytesToRead;
+        position += bytesToRead;
+        offset += bytesToRead;
       }
-      fetchBlockByteRange(targetBlock, targetStart, targetEnd, buf, off);
+      assert remaining == 0 : "Wrong number of bytes read.";
       return realLen;
     }
         
@@ -914,7 +985,7 @@
      * Seek to a new arbitrary location
      */
     public synchronized void seek(long targetPos) throws IOException {
-      if (targetPos > filelen) {
+      if (targetPos > getFileLength()) {
         throw new IOException("Cannot seek after EOF");
       }
       boolean done = false;
@@ -974,7 +1045,7 @@
       if (closed) {
         throw new IOException("Stream closed");
       }
-      return (int) (filelen - pos);
+      return (int) (getFileLength() - pos);
     }
 
     /**
@@ -1015,11 +1086,10 @@
     }
 
     /**
-     * Used by the automatic tests to detemine blocks locations of a
-     * file
+     * Return collection of blocks that has already been located.
      */
-    synchronized DatanodeInfo[][] getDataNodes() {
-      return ((DFSInputStream)inStream).getDataNodes();
+    synchronized List<LocatedBlock> getAllBlocks() throws IOException {
+      return ((DFSInputStream)inStream).getAllBlocks();
     }
 
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java Tue May 22 12:40:11 2007
@@ -24,7 +24,10 @@
 
 /******************************************************
  * DFSFileInfo tracks info about remote files, including
- * name, size, etc.  
+ * name, size, etc.
+ * 
+ * Includes partial information about its blocks.
+ * Block locations are sorted by the distance to the current client.
  * 
  * @author Mike Cafarella
  ******************************************************/
@@ -37,13 +40,12 @@
        });
   }
 
-  UTF8 path;
+  Path path;
   long len;
-  long contentsLen;
   boolean isDir;
   short blockReplication;
   long blockSize;
-
+  
   /**
    */
   public DFSFileInfo() {
@@ -53,13 +55,9 @@
    * Create DFSFileInfo by file INode 
    */
   public DFSFileInfo(FSDirectory.INode node) {
-    this.path = new UTF8(node.computeName());
+    this.path = new Path(node.computeName());
     this.isDir = node.isDir();
-    if (isDir) {
-      this.len = 0;
-      this.contentsLen = node.computeContentsLength();
-    } else 
-      this.len = this.contentsLen = node.computeFileLength();
+    this.len = isDir ? node.computeContentsLength() : node.computeFileLength();
     this.blockReplication = node.getReplication();
     blockSize = node.getBlockSize();
   }
@@ -73,13 +71,13 @@
   /**
    */
   public String getName() {
-    return new Path(path.toString()).getName();
+    return path.getName();
   }
-
+  
   /**
    */
   public String getParent() {
-    return new Path(path.toString()).getParent().toString();
+    return path.getParent().toString();
   }
 
   /**
@@ -89,9 +87,11 @@
   }
 
   /**
+   * @deprecated use {@link #getLen()} instead
    */
   public long getContentsLen() {
-    return contentsLen;
+    assert isDir() : "Must be a directory";
+    return len;
   }
 
   /**
@@ -118,22 +118,19 @@
   // Writable
   //////////////////////////////////////////////////
   public void write(DataOutput out) throws IOException {
-    path.write(out);
+    Text.writeString(out, getPath());
     out.writeLong(len);
-    out.writeLong(contentsLen);
     out.writeBoolean(isDir);
     out.writeShort(blockReplication);
     out.writeLong(blockSize);
   }
-
+  
   public void readFields(DataInput in) throws IOException {
-    this.path = new UTF8();
-    this.path.readFields(in);
+    String strPath = Text.readString(in);
+    this.path = new Path(strPath);
     this.len = in.readLong();
-    this.contentsLen = in.readLong();
     this.isDir = in.readBoolean();
     this.blockReplication = in.readShort();
     blockSize = in.readLong();
   }
 }
-

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DfsPath.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DfsPath.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DfsPath.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DfsPath.java Tue May 22 12:40:11 2007
@@ -43,7 +43,8 @@
     return info.getLen();
   }
   public long getContentsLength() {
-    return info.getContentsLen();
+    assert isDirectory() : "Must be a directory";
+    return info.getLen();
   }
   public short getReplication() {
     return info.getReplication();

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java Tue May 22 12:40:11 2007
@@ -106,7 +106,14 @@
       workingDir = makeAbsolute(dir);
     }
     
+    /**
+     * @deprecated use {@link #getPathName(Path)} instead.
+     */
     private UTF8 getPath(Path file) {
+      return new UTF8(getPathName(file));
+    }
+
+    private String getPathName(Path file) {
       checkPath(file);
       String result = makeAbsolute(file).toUri().getPath();
       if (!FSNamesystem.isValidName(result)) {
@@ -114,11 +121,11 @@
                                            file +
                                            " is not a valid DFS filename.");
       }
-      return new UTF8(result);
+      return result;
     }
 
     public String[][] getFileCacheHints(Path f, long start, long len) throws IOException {
-      return dfs.getHints(getPath(f), start, len);
+      return dfs.getHints(getPathName(f), start, len);
     }
 
     public FSDataInputStream open(Path f, int bufferSize) throws IOException {
@@ -192,7 +199,7 @@
       }
 
       DFSFileInfo info[] = dfs.listPaths(getPath(f));
-      return (info == null) ? 0 : info[0].getContentsLen();
+      return (info == null) ? 0 : info[0].getLen();
     }
 
     public short getReplication(Path f) throws IOException {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java Tue May 22 12:40:11 2007
@@ -49,7 +49,7 @@
     private String name;
     private INode parent;
     private TreeMap<String, INode> children = null;
-    private Block blocks[];
+    private Block blocks[] = null;
     private short blockReplication;
 
     /**
@@ -654,17 +654,27 @@
   }
 
   /**
-   * Get the blocks associated with the file
+   * Get the blocks associated with the file.
    */
-  public Block[] getFile(UTF8 src) {
+  Block[] getFileBlocks(String src) {
     waitForReady();
     synchronized (rootDir) {
-      INode targetNode = rootDir.getNode(src.toString());
+      INode targetNode = rootDir.getNode(src);
       if (targetNode == null) {
         return null;
       } else {
         return targetNode.blocks;
       }
+    }
+  }
+
+  /**
+   * Get {@link INode} associated with the file.
+   */
+  INode getFileINode(String src) {
+    waitForReady();
+    synchronized (rootDir) {
+      return rootDir.getNode(src);
     }
   }
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Tue May 22 12:40:11 2007
@@ -24,13 +24,11 @@
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.mapred.StatusHttpServer;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ipc.Server;
 
 import java.io.*;
 import java.util.*;
-import java.lang.UnsupportedOperationException;
 
 /***************************************************
  * FSNamesystem does the actual bookkeeping work for the
@@ -426,41 +424,69 @@
   //
   /////////////////////////////////////////////////////////
   /**
-   * The client wants to open the given filename.  Return a
-   * list of (block,machineArray) pairs.  The sequence of unique blocks
-   * in the list indicates all the blocks that make up the filename.
-   *
-   * The client should choose one of the machines from the machineArray
-   * at random.
+   * Get block locations within the specified range.
+   * 
+   * @see ClientProtocol#open(String, long, long)
+   * @see ClientProtocol#getBlockLocations(String, long, long)
    */
-  synchronized public Object[] open(String clientMachine, UTF8 src) {
-    Object results[] = null;
-    Block blocks[] = dir.getFile(src);
-    if (blocks != null) {
-      results = new Object[2];
-      DatanodeDescriptor machineSets[][] = new DatanodeDescriptor[blocks.length][];
-      DatanodeDescriptor client = 
-        host2DataNodeMap.getDatanodeByHost(clientMachine);
-
-      for (int i = 0; i < blocks.length; i++) {
-        int numNodes = blocksMap.numNodes(blocks[i]);
-        if (numNodes <= 0) {
-          machineSets[i] = new DatanodeDescriptor[0];
-        } else {
-          machineSets[i] = new DatanodeDescriptor[ numNodes ];
-          numNodes = 0;
-          for(Iterator<DatanodeDescriptor> it = 
-                blocksMap.nodeIterator(blocks[i]); it.hasNext();) {
-            machineSets[i][ numNodes++ ] = it.next();
-          }
-          clusterMap.sortByDistance(client, machineSets[i]);
-        }
-      }
+  synchronized LocatedBlocks  getBlockLocations(String clientMachine,
+                                                String src, 
+                                                long offset, 
+                                                long length) {
+    return  getBlockLocations(clientMachine, 
+                              dir.getFileINode(src), 
+                              offset, length, Integer.MAX_VALUE);
+  }
+  
+  private LocatedBlocks getBlockLocations(String clientMachine,
+                                          FSDirectory.INode inode, 
+                                          long offset, 
+                                          long length,
+                                          int nrBlocksToReturn) {
+    if(inode == null || inode.isDir()) {
+      return null;
+    }
+    Block[] blocks = inode.getBlocks();
+    if (blocks == null) {
+      return null;
+    }
+    List<LocatedBlock> results;
+    results = new ArrayList<LocatedBlock>(blocks.length);
 
-      results[0] = blocks;
-      results[1] = machineSets;
+    int curBlk = 0;
+    long curPos = 0, blkSize = 0;
+    for (curBlk = 0; curBlk < blocks.length; curBlk++) {
+      blkSize = blocks[curBlk].getNumBytes();
+      if (curPos + blkSize > offset) {
+        break;
+      }
+      curPos += blkSize;
     }
-    return results;
+    
+    long endOff = offset + length;
+    
+    DatanodeDescriptor client;
+    client = host2DataNodeMap.getDatanodeByHost(clientMachine);
+    do {
+      // get block locations
+      int numNodes = blocksMap.numNodes(blocks[curBlk]);
+      DatanodeDescriptor[] machineSet = new DatanodeDescriptor[numNodes];
+      if (numNodes > 0) {
+        numNodes = 0;
+        for(Iterator<DatanodeDescriptor> it = 
+            blocksMap.nodeIterator(blocks[curBlk]); it.hasNext();) {
+          machineSet[numNodes++] = it.next();
+        }
+        clusterMap.sortByDistance(client, machineSet);
+      }
+      results.add(new LocatedBlock(blocks[curBlk], machineSet, curPos));
+      curPos += blocks[curBlk].getNumBytes();
+      curBlk++;
+    } while (curPos < endOff 
+          && curBlk < blocks.length 
+          && results.size() < nrBlocksToReturn);
+    
+    return new LocatedBlocks(inode, results);
   }
 
   /**
@@ -545,13 +571,13 @@
    * @throws IOException if the filename is invalid
    *         {@link FSDirectory#isValidToCreate(UTF8)}.
    */
-  public synchronized Object[] startFile(UTF8 src, 
-                                         UTF8 holder, 
-                                         UTF8 clientMachine, 
-                                         boolean overwrite,
-                                         short replication,
-                                         long blockSize
-                                         ) throws IOException {
+  public synchronized LocatedBlock startFile(UTF8 src, 
+                                             UTF8 holder, 
+                                             UTF8 clientMachine, 
+                                             boolean overwrite,
+                                             short replication,
+                                             long blockSize
+                                             ) throws IOException {
     NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: file "
                                   +src+" for "+holder+" at "+clientMachine);
     if (isInSafeMode())
@@ -670,11 +696,8 @@
         lease.startedCreate(src);
       }
 
-      // Create next block
-      Object results[] = new Object[2];
-      results[0] = allocateBlock(src);
-      results[1] = targets;
-      return results;
+      // Create first block
+      return new LocatedBlock(allocateBlock(src), targets, 0L);
     } catch (IOException ie) {
       NameNode.stateChangeLog.warn("DIR* NameSystem.startFile: "
                                    +ie.getMessage());
@@ -693,9 +716,9 @@
    * are replicated.  Will return an empty 2-elt array if we want the
    * client to "try again later".
    */
-  public synchronized Object[] getAdditionalBlock(UTF8 src, 
-                                                  UTF8 clientName
-                                                  ) throws IOException {
+  public synchronized LocatedBlock getAdditionalBlock(UTF8 src, 
+                                                      UTF8 clientName
+                                                      ) throws IOException {
     NameNode.stateChangeLog.debug("BLOCK* NameSystem.getAdditionalBlock: file "
                                   +src+" for "+clientName);
     if (isInSafeMode())
@@ -732,7 +755,9 @@
     }
         
     // Create next block
-    return new Object[]{allocateBlock(src), targets};
+    return new LocatedBlock(allocateBlock(src), 
+                            targets, 
+                            pendingFile.computeFileLength());
   }
 
   /**
@@ -803,10 +828,10 @@
       throw new SafeModeException("Cannot complete file " + src, safeMode);
     FileUnderConstruction pendingFile = pendingCreates.get(src);
 
-    if (dir.getFile(src) != null || pendingFile == null) {
+    if (dir.getFileBlocks(src.toString()) != null || pendingFile == null) {
       NameNode.stateChangeLog.warn("DIR* NameSystem.completeFile: "
                                    + "failed to complete " + src
-                                   + " because dir.getFile()==" + dir.getFile(src) 
+                                    + " because dir.getFile()==" + dir.getFileBlocks(src.toString()) 
                                    + " and " + pendingFile);
       return OPERATION_FAILED;
     } else if (!checkFileProgress(pendingFile, true)) {
@@ -1054,7 +1079,7 @@
    * Return whether the given filename exists
    */
   public boolean exists(UTF8 src) {
-    if (dir.getFile(src) != null || dir.isDir(src)) {
+    if (dir.getFileBlocks(src.toString()) != null || dir.isDir(src)) {
       return true;
     } else {
       return false;
@@ -1111,65 +1136,6 @@
     return success;
   }
 
-  /**
-   * Figure out a few hosts that are likely to contain the
-   * block(s) referred to by the given (filename, start, len) tuple.
-   */
-  public String[][] getDatanodeHints(String src, long start, long len) {
-    if (start < 0 || len < 0) {
-      return new String[0][];
-    }
-
-    int startBlock = -1;
-    int endBlock = -1;
-    Block blocks[] = dir.getFile(new UTF8(src));
-
-    if (blocks == null) {                     // no blocks
-      return new String[0][];
-    }
-
-    //
-    // First, figure out where the range falls in
-    // the blocklist.
-    //
-    long startpos = start;
-    long endpos = start + len;
-    for (int i = 0; i < blocks.length; i++) {
-      if (startpos >= 0) {
-        startpos -= blocks[i].getNumBytes();
-        if (startpos <= 0) {
-          startBlock = i;
-        }
-      }
-      if (endpos >= 0) {
-        endpos -= blocks[i].getNumBytes();
-        if (endpos <= 0) {
-          endBlock = i;
-          break;
-        }
-      }
-    }
-
-    //
-    // Next, create an array of hosts where each block can
-    // be found
-    //
-    if (startBlock < 0 || endBlock < 0) {
-      return new String[0][];
-    } else {
-      String hosts[][] = new String[(endBlock - startBlock) + 1][];
-      for (int i = startBlock; i <= endBlock; i++) {
-        Collection<String> v = new ArrayList<String>();
-        for (Iterator<DatanodeDescriptor> it = 
-               blocksMap.nodeIterator(blocks[i]); it.hasNext();) {
-          v.add(it.next().getHostName());
-        }
-        hosts[i-startBlock] = v.toArray(new String[v.size()]);
-      }
-      return hosts;
-    }
-  }
-
   /************************************************************
    * A Lease governs all the locks held by a single client.
    * For each client there's a corresponding lease, whose
@@ -2884,7 +2850,7 @@
    * <em>safe blocks</em>, those that have at least the minimal number of
    * replicas, and calculates the ratio of safe blocks to the total number
    * of blocks in the system, which is the size of
-   * {@link blocksMap}. When the ratio reaches the
+   * {@link FSNamesystem#blocksMap}. When the ratio reaches the
    * {@link #threshold} it starts the {@link SafeModeMonitor} daemon in order
    * to monitor whether the safe mode extension is passed. Then it leaves safe
    * mode and destroys itself.

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FileUnderConstruction.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FileUnderConstruction.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FileUnderConstruction.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FileUnderConstruction.java Tue May 22 12:40:11 2007
@@ -84,4 +84,12 @@
     }
     return ((ArrayList<Block>)blocks).get(blocks.size() - 2);
   }
+  
+  long computeFileLength() {
+    long total = 0;
+    for (Block blk : blocks) {
+      total += blk.getNumBytes();
+    }
+    return total;
+  }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/LocatedBlock.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/LocatedBlock.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/LocatedBlock.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/LocatedBlock.java Tue May 22 12:40:11 2007
@@ -37,20 +37,27 @@
        });
   }
 
-  Block b;
-  DatanodeInfo locs[];
+  private Block b;
+  private long offset;  // offset of the first byte of the block in the file
+  private DatanodeInfo[] locs;
 
   /**
    */
   public LocatedBlock() {
-    this.b = new Block();
-    this.locs = new DatanodeInfo[0];
+    this(new Block(), new DatanodeInfo[0], 0L);
   }
 
   /**
    */
   public LocatedBlock(Block b, DatanodeInfo[] locs) {
+    this(b, locs, -1); // startOffset is unknown
+  }
+
+  /**
+   */
+  public LocatedBlock(Block b, DatanodeInfo[] locs, long startOffset) {
     this.b = b;
+    this.offset = startOffset;
     if (locs==null) {
       this.locs = new DatanodeInfo[0];
     } else {
@@ -69,11 +76,24 @@
   DatanodeInfo[] getLocations() {
     return locs;
   }
+  
+  long getStartOffset() {
+    return offset;
+  }
+  
+  long getBlockSize() {
+    return b.getNumBytes();
+  }
+
+  void setStartOffset(long value) {
+    this.offset = value;
+  }
 
   ///////////////////////////////////////////
   // Writable
   ///////////////////////////////////////////
   public void write(DataOutput out) throws IOException {
+    out.writeLong(offset);
     b.write(out);
     out.writeInt(locs.length);
     for (int i = 0; i < locs.length; i++) {
@@ -82,6 +102,7 @@
   }
 
   public void readFields(DataInput in) throws IOException {
+    offset = in.readLong();
     this.b = new Block();
     b.readFields(in);
     int count = in.readInt();

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Tue May 22 12:40:11 2007
@@ -253,24 +253,32 @@
     
   /**
    */
-  public LocatedBlock[] open(String src) throws IOException {
+  public LocatedBlocks open(String src,
+                            long offset,
+                            long length) throws IOException {
+    LocatedBlocks result = getBlockLocations(src, offset, length);
+    if (result == null) {
+      throw new IOException("Cannot open filename " + src);
+    }
+    myMetrics.openFile();
+    return result;
+  }
+
+  /**
+   */
+  public LocatedBlocks   getBlockLocations(String src, 
+                                          long offset, 
+                                          long length) throws IOException {
+    return namesystem.getBlockLocations(getClientMachine(), 
+                                        src, offset, length);
+  }
+  
+  private static String getClientMachine() {
     String clientMachine = Server.getRemoteAddress();
     if (clientMachine == null) {
       clientMachine = "";
     }
-    Object openResults[] = namesystem.open(clientMachine, new UTF8(src));
-    if (openResults == null) {
-      throw new IOException("Cannot open filename " + src);
-    } else {
-      myMetrics.openFile();
-      Block blocks[] = (Block[]) openResults[0];
-      DatanodeInfo sets[][] = (DatanodeInfo[][]) openResults[1];
-      LocatedBlock results[] = new LocatedBlock[blocks.length];
-      for (int i = 0; i < blocks.length; i++) {
-        results[i] = new LocatedBlock(blocks[i], sets[i]);
-      }
-      return results;
-    }
+    return clientMachine;
   }
 
   /**
@@ -281,26 +289,21 @@
                              short replication,
                              long blockSize
                              ) throws IOException {
-    String clientMachine = Server.getRemoteAddress();
-    if (clientMachine == null) {
-      clientMachine = "";
-    }
+    String clientMachine = getClientMachine();
     stateChangeLog.debug("*DIR* NameNode.create: file "
                          +src+" for "+clientName+" at "+clientMachine);
     if (!checkPathLength(src)) {
       throw new IOException("create: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
-    Object results[] = namesystem.startFile(new UTF8(src), 
-                                            new UTF8(clientName), 
-                                            new UTF8(clientMachine), 
-                                            overwrite,
-                                            replication,
-                                            blockSize);
+    LocatedBlock result =  namesystem.startFile(new UTF8(src), 
+                                                new UTF8(clientName), 
+                                                new UTF8(clientMachine), 
+                                                overwrite,
+                                                replication,
+                                                blockSize);
     myMetrics.createFile();
-    Block b = (Block) results[0];
-    DatanodeInfo targets[] = (DatanodeInfo[]) results[1];
-    return new LocatedBlock(b, targets);
+    return result;
   }
 
   public boolean setReplication(String src, 
@@ -317,10 +320,7 @@
                          +src+" for "+clientName);
     UTF8 src8 = new UTF8(src);
     UTF8 client8 = new UTF8(clientName);
-    Object[] results = namesystem.getAdditionalBlock(src8, client8);
-    Block b = (Block) results[0];
-    DatanodeInfo targets[] = (DatanodeInfo[]) results[1];
-    return new LocatedBlock(b, targets);            
+    return namesystem.getAdditionalBlock(src8, client8);
   }
 
   /**
@@ -372,12 +372,6 @@
     }
   }
 
-  /**
-   */
-  public String[][] getHints(String src, long start, long len) throws IOException {
-    return namesystem.getDatanodeHints(src, start, len);
-  }
-    
   public long getBlockSize(String filename) throws IOException {
     return namesystem.getBlockSize(filename);
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Tue May 22 12:40:11 2007
@@ -36,7 +36,6 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.UTF8;
-import org.apache.hadoop.net.DNS;
 
 
 /**
@@ -152,11 +151,12 @@
       return;
     }
     res.totalFiles++;
-    res.totalSize += file.getLen();
-    LocatedBlock[] blocks = nn.open(file.getPath());
-    res.totalBlocks += blocks.length;
+    long fileLen = file.getLen();
+    res.totalSize += fileLen;
+    LocatedBlocks blocks = nn.getBlockLocations(file.getPath(), 0, fileLen);
+    res.totalBlocks += blocks.locatedBlockCount();
     if (showFiles) {
-      out.print(file.getPath() + " " + file.getLen() + ", " + blocks.length + " block(s): ");
+      out.print(file.getPath() + " " + fileLen + ", " + res.totalBlocks + " block(s): ");
     }  else {
       out.print('.');
       out.flush();
@@ -166,10 +166,11 @@
     long missize = 0;
     int under = 0;
     StringBuffer report = new StringBuffer();
-    for (int i = 0; i < blocks.length; i++) {
-      Block block = blocks[i].getBlock();
+    int i = 0;
+    for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
+      Block block = lBlk.getBlock();
       long id = block.getBlockId();
-      DatanodeInfo[] locs = blocks[i].getLocations();
+      DatanodeInfo[] locs = lBlk.getLocations();
       short targetFileReplication = file.getReplication();
       if (locs.length > targetFileReplication) {
         res.overReplicatedBlocks += (locs.length - targetFileReplication);
@@ -206,6 +207,7 @@
         }
       }
       report.append('\n');
+      i++;
     }
     if (missing > 0) {
       if (!showFiles) {
@@ -236,7 +238,7 @@
     }
   }
   
-  private void lostFoundMove(DFSFileInfo file, LocatedBlock[] blocks)
+  private void lostFoundMove(DFSFileInfo file, LocatedBlocks blocks)
     throws IOException {
     DFSClient dfs = new DFSClient(DataNode.createSocketAddr(
                                                             conf.get("fs.default.name", "local")), conf);
@@ -256,8 +258,8 @@
       // create chains
       int chain = 0;
       OutputStream fos = null;
-      for (int i = 0; i < blocks.length; i++) {
-        LocatedBlock lblock = blocks[i];
+      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
+        LocatedBlock lblock = lBlk;
         DatanodeInfo[] locs = lblock.getLocations();
         if (locs == null || locs.length == 0) {
           if (fos != null) {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Tue May 22 12:40:11 2007
@@ -48,7 +48,7 @@
    * @param conf the base configuration to use in starting the servers.  This
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
-   * @param operation the operation with which to start the servers.  If null
+   * @param nameNodeOperation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    */
   public MiniDFSCluster(Configuration conf,
@@ -207,7 +207,7 @@
    * When this method return, the NameNode should be finalized, but
    * DataNodes may not be since that occurs asynchronously.
    *
-   * @throw IllegalStateException if the Namenode is not running.
+   * @throws IllegalStateException if the Namenode is not running.
    */
   public void finalizeCluster(Configuration conf) throws Exception {
     if (nameNode == null) {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java Tue May 22 12:40:11 2007
@@ -19,6 +19,7 @@
 
 import junit.framework.TestCase;
 import java.io.*;
+import java.util.Collection;
 import java.util.Random;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -118,11 +119,11 @@
         
     DFSClient.DFSDataInputStream dis = (DFSClient.DFSDataInputStream) 
       ((DistributedFileSystem)fileSys).getRawFileSystem().open(name);
-    DatanodeInfo[][] dinfo = dis.getDataNodes();
+    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
 
-    for (int blk = 0; blk < dinfo.length; blk++) { // for each block
+    for (LocatedBlock blk : dinfo) { // for each block
       int hasdown = 0;
-      DatanodeInfo[] nodes = dinfo[blk];
+      DatanodeInfo[] nodes = blk.getLocations();
       for (int j = 0; j < nodes.length; j++) {     // for each replica
         if (nodes[j].getName().equals(downnode)) {
           hasdown++;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java Tue May 22 12:40:11 2007
@@ -17,13 +17,11 @@
  */
 package org.apache.hadoop.dfs;
 
-import javax.swing.filechooser.FileSystemView;
 import junit.framework.TestCase;
 import java.io.*;
 import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
@@ -40,7 +38,7 @@
     // create and write a file that contains three blocks of data
     DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
                                           (long)blockSize);
-    byte[] buffer = new byte[(int)(3*blockSize)];
+    byte[] buffer = new byte[(int)(12*blockSize)];
     Random rand = new Random(seed);
     rand.nextBytes(buffer);
     stm.write(buffer);
@@ -49,7 +47,7 @@
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
                         expected[from+idx]+" actual "+actual[idx],
                         actual[idx], expected[from+idx]);
       actual[idx] = 0;
@@ -67,7 +65,7 @@
   }
   private void pReadFile(FileSystem fileSys, Path name) throws IOException {
     FSDataInputStream stm = fileSys.open(name);
-    byte[] expected = new byte[(int)(3*blockSize)];
+    byte[] expected = new byte[(int)(12*blockSize)];
     Random rand = new Random(seed);
     rand.nextBytes(expected);
     // do a sanity check. Read first 4K bytes
@@ -91,18 +89,35 @@
     actual = new byte[(int)(blockSize+4096)];
     stm.readFully(blockSize - 2048, actual);
     checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 4");
+    // now see if we can cross two block boundaries that are not cached
+    // read blockSize + 4K bytes from 10*blockSize - 2K offset
+    actual = new byte[(int)(blockSize+4096)];
+    stm.readFully(10*blockSize - 2048, actual);
+    checkAndEraseData(actual, (int)(10*blockSize-2048), expected, "Pread Test 5");
     // now check that even after all these preads, we can still read
     // bytes 8K-12K
     actual = new byte[4096];
     stm.readFully(actual);
-    checkAndEraseData(actual, 8192, expected, "Pread Test 5");
-    // all done
+    checkAndEraseData(actual, 8192, expected, "Pread Test 6");
+    // done
+    stm.close();
+    // check block location caching
+    stm = fileSys.open(name);
+    stm.readFully(1, actual, 0, 4096);
+    stm.readFully(4*blockSize, actual, 0, 4096);
+    stm.readFully(7*blockSize, actual, 0, 4096);
+    actual = new byte[3*4096];
+    stm.readFully(0*blockSize, actual, 0, 3*4096);
+    checkAndEraseData(actual, 0, expected, "Pread Test 7");
+    actual = new byte[8*4096];
+    stm.readFully(3*blockSize, actual, 0, 8*4096);
+    checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
     stm.close();
   }
   
   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
     assertTrue(fileSys.exists(name));
-    fileSys.delete(name);
+    assertTrue(fileSys.delete(name));
     assertTrue(!fileSys.exists(name));
   }
   
@@ -111,6 +126,8 @@
    */
   public void testPreadDFS() throws IOException {
     Configuration conf = new Configuration();
+    conf.setLong("dfs.block.size", 4096);
+    conf.setLong("dfs.read.prefetch.size", 4096);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
     FileSystem fileSys = cluster.getFileSystem();
     try {
@@ -138,5 +155,9 @@
     } finally {
       fileSys.close();
     }
+  }
+
+  public static void main(String[] args) throws Exception {
+    new TestPread().testPreadDFS();
   }
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java Tue May 22 12:40:11 2007
@@ -29,7 +29,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.DNS;
 
 /**
  * This class tests the replication of a DFS file.
@@ -69,13 +68,13 @@
                                                             DataNode.createSocketAddr(conf.get("fs.default.name")), 
                                                             conf);
       
-    LocatedBlock[] locations;
+    LocatedBlocks locations;
     boolean isReplicationDone;
     do {
-      locations = namenode.open(name.toString());
+      locations = namenode.getBlockLocations(name.toString(),0,Long.MAX_VALUE);
       isReplicationDone = true;
-      for (int idx = 0; idx < locations.length; idx++) {
-        DatanodeInfo[] datanodes = locations[idx].getLocations();
+      for (LocatedBlock blk : locations.getLocatedBlocks()) {
+        DatanodeInfo[] datanodes = blk.getLocations();
         if (Math.min(numDatanodes, repl) != datanodes.length) {
           isReplicationDone=false;
           LOG.warn("File has "+datanodes.length+" replicas, expecting "
@@ -91,8 +90,8 @@
     } while(!isReplicationDone);
       
     boolean isOnSameRack = true, isNotOnSameRack = true;
-    for (int idx = 0; idx < locations.length; idx++) {
-      DatanodeInfo[] datanodes = locations[idx].getLocations();
+    for (LocatedBlock blk : locations.getLocatedBlocks()) {
+      DatanodeInfo[] datanodes = blk.getLocations();
       if (datanodes.length <= 1) break;
       if (datanodes.length == 2) {
         isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java Tue May 22 12:40:11 2007
@@ -25,7 +25,6 @@
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
-import java.net.InetSocketAddress;
 import java.util.Random;
 import java.util.zip.CRC32;
 import org.apache.hadoop.conf.Configuration;
@@ -191,8 +190,8 @@
    *    the Datanode is started.
    * @param dir must be a directory. Subdirectories are ignored.
    *
-   * @throw IllegalArgumentException if specified directory is not a directory
-   * @throw IOException if an IOException occurs while reading the files
+   * @throws IllegalArgumentException if specified directory is not a directory
+   * @throws IOException if an IOException occurs while reading the files
    * @return the computed checksum value
    */
   public static long checksumContents(NodeType nodeType, File dir) throws IOException {
@@ -304,8 +303,8 @@
    * Corrupt the specified file.  Some random bytes within the file
    * will be changed to some random values.
    *
-   * @throw IllegalArgumentException if the given file is not a file
-   * @throw IOException if an IOException occurs while reading or writing the file
+   * @throws IllegalArgumentException if the given file is not a file
+   * @throws IOException if an IOException occurs while reading or writing the file
    */
   public static void corruptFile(File file) throws IOException {
     if (!file.isFile()) {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java Tue May 22 12:40:11 2007
@@ -27,6 +27,7 @@
 import org.apache.commons.logging.*;
 
 import org.apache.hadoop.mapred.*;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.conf.*;
@@ -361,7 +362,7 @@
     
       analyzeResult(fs, testType, execTime, resFileName);
     } catch(Exception e) {
-      System.err.print(e.getLocalizedMessage());
+      System.err.print(StringUtils.stringifyException(e));
       System.exit(-1);
     }
   }

Modified: lucene/hadoop/trunk/src/webapps/datanode/browseBlock.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/datanode/browseBlock.jsp?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/webapps/datanode/browseBlock.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/datanode/browseBlock.jsp Tue May 22 12:40:11 2007
@@ -68,7 +68,8 @@
     blockSize = Long.parseLong(blockSizeStr);
 
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
-    LocatedBlock[] blocks = dfs.namenode.open(filename);
+    List<LocatedBlock> blocks = 
+      dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
     //Add the various links for looking at the file contents
     //URL for downloading the full file
     String downloadUrl = "http://" + req.getServerName() + ":" +
@@ -79,7 +80,7 @@
     
     DatanodeInfo chosenNode;
     //URL for TAIL 
-    LocatedBlock lastBlk = blocks[blocks.length - 1];
+    LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
     long blockId = lastBlk.getBlock().getBlockId();
     try {
       chosenNode = jspHelper.bestNode(lastBlk);
@@ -119,16 +120,16 @@
     out.print("</form>");
     out.print("<hr>"); 
     out.print("<a name=\"blockDetails\"></a>");
-    out.print("<B>Total number of blocks: "+blocks.length+"</B><br>");
+    out.print("<B>Total number of blocks: "+blocks.size()+"</B><br>");
     //generate a table and dump the info
     out.println("\n<table>");
-    for (int i = 0; i < blocks.length; i++) {
+    for (LocatedBlock cur : blocks) {
       out.print("<tr>");
-      blockId = blocks[i].getBlock().getBlockId();
-      blockSize = blocks[i].getBlock().getNumBytes();
+      blockId = cur.getBlock().getBlockId();
+      blockSize = cur.getBlock().getNumBytes();
       String blk = "blk_" + Long.toString(blockId);
-      out.print("<td>"+blk+":</td>");
-      DatanodeInfo[] locs = blocks[i].getLocations();
+      out.print("<td>"+cur+":</td>");
+      DatanodeInfo[] locs = cur.getLocations();
       for(int j=0; j<locs.length; j++) {
         String datanodeAddr = locs[j].getName();
         datanodePort = Integer.parseInt(datanodeAddr.substring(
@@ -230,11 +231,12 @@
     //determine data for the next link
     if (startOffset + chunkSizeToView >= blockSize) {
       //we have to go to the next block from this point onwards
-      LocatedBlock[] blocks = dfs.namenode.open(filename);
-      for (int i = 0; i < blocks.length; i++) {
-        if (blocks[i].getBlock().getBlockId() == blockId) {
-          if (i != blocks.length - 1) {
-            LocatedBlock nextBlock = blocks[i+1];
+      List<LocatedBlock> blocks = 
+        dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
+      for (int i = 0; i < blocks.size(); i++) {
+        if (blocks.get(i).getBlock().getBlockId() == blockId) {
+          if (i != blocks.size() - 1) {
+            LocatedBlock nextBlock = blocks.get(i+1);
             nextBlockIdStr = Long.toString(nextBlock.getBlock().getBlockId());
             nextStartOffset = 0;
             nextBlockSize = nextBlock.getBlock().getNumBytes();
@@ -277,11 +279,12 @@
     int prevPort = req.getServerPort();
     int prevDatanodePort = datanodePort;
     if (startOffset == 0) {
-      LocatedBlock [] blocks = dfs.namenode.open(filename);
-      for (int i = 0; i < blocks.length; i++) {
-        if (blocks[i].getBlock().getBlockId() == blockId) {
+      List<LocatedBlock> blocks = 
+        dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
+      for (int i = 0; i < blocks.size(); i++) {
+        if (blocks.get(i).getBlock().getBlockId() == blockId) {
           if (i != 0) {
-            LocatedBlock prevBlock = blocks[i-1];
+            LocatedBlock prevBlock = blocks.get(i-1);
             prevBlockIdStr = Long.toString(prevBlock.getBlock().getBlockId());
             prevStartOffset = prevBlock.getBlock().getNumBytes() - chunkSizeToView;
             if (prevStartOffset < 0)

Modified: lucene/hadoop/trunk/src/webapps/datanode/browseDirectory.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/datanode/browseDirectory.jsp?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/webapps/datanode/browseDirectory.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/datanode/browseDirectory.jsp Tue May 22 12:40:11 2007
@@ -33,12 +33,15 @@
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
     UTF8 target = new UTF8(dir);
     if( !dfs.isDirectory(target) ) { // a file
-      LocatedBlock[] blocks = dfs.namenode.open(dir);
-      DatanodeInfo [] locations = blocks[0].getLocations();
+      List<LocatedBlock> blocks = 
+        dfs.namenode.getBlockLocations(dir, 0, 1).getLocatedBlocks();
+      
+      LocatedBlock firstBlock = blocks.get(0);
+      DatanodeInfo [] locations = firstBlock.getLocations();
       if (locations.length == 0) {
         out.print("Empty file");
       } else {
-        DatanodeInfo chosenNode = jspHelper.bestNode(blocks[0]);
+        DatanodeInfo chosenNode = jspHelper.bestNode(firstBlock);
         String fqdn = InetAddress.getByName(chosenNode.getHost()).
                                   getCanonicalHostName();
         String datanodeAddr = chosenNode.getName();
@@ -49,8 +52,8 @@
         String redirectLocation = "http://"+fqdn+":" +
                              chosenNode.getInfoPort() + 
                              "/browseBlock.jsp?blockId=" +
-                             blocks[0].getBlock().getBlockId() +
-                             "&blockSize=" + blocks[0].getBlock().getNumBytes() +
+                             firstBlock.getBlock().getBlockId() +
+                             "&blockSize=" + firstBlock.getBlock().getNumBytes() +
                              "&filename=" + URLEncoder.encode(dir, "UTF-8") + 
                              "&datanodePort=" + datanodePort + 
                              "&namenodeInfoPort=" + namenodeInfoPort;
@@ -87,9 +90,9 @@
       //Get the location of the first block of the file
       if (files[i].getPath().endsWith(".crc")) continue;
       if (!files[i].isDir()) {
-        LocatedBlock[] blocks = dfs.namenode.open(files[i].getPath());
-
-        DatanodeInfo [] locations = blocks[0].getLocations();
+        List<LocatedBlock> blocks = 
+          dfs.namenode.getBlockLocations(files[i].getPath(), 0, 1).getLocatedBlocks();
+        DatanodeInfo [] locations = blocks.get(0).getLocations();
         if (locations.length == 0) {
           cols[0] = files[i].getName();
         } else {

Modified: lucene/hadoop/trunk/src/webapps/datanode/tail.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/datanode/tail.jsp?view=diff&rev=540715&r1=540714&r2=540715
==============================================================================
--- lucene/hadoop/trunk/src/webapps/datanode/tail.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/datanode/tail.jsp Tue May 22 12:40:11 2007
@@ -55,13 +55,14 @@
     //fetch the block from the datanode that has the last block for this file
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, 
                                          jspHelper.conf);
-    LocatedBlock blocks[] = dfs.namenode.open(filename); 
-    if (blocks == null || blocks.length == 0) {
+    List<LocatedBlock> blocks = 
+      dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
+    if (blocks == null || blocks.size() == 0) {
       out.print("No datanodes contain blocks of file "+filename);
       dfs.close();
       return;
     }
-    LocatedBlock lastBlk = blocks[blocks.length - 1];
+    LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
     long blockSize = lastBlk.getBlock().getNumBytes();
     long blockId = lastBlk.getBlock().getBlockId();
     DatanodeInfo chosenNode;



Mime
View raw message