hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r800624 [2/2] - in /hadoop/hdfs/branches/HDFS-265: ./ ivy/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/balancer/ src/java/org/apache/hado...
Date Mon, 03 Aug 2009 23:34:05 GMT
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java Mon Aug  3 23:34:04 2009
@@ -30,259 +30,6 @@
  * the datanodes that store the block.
  */
 class BlocksMap {
-        
-  /**
-   * Internal class for block metadata.
-   */
-  static class BlockInfo extends Block {
-    private INodeFile inode;
-
-    /**
-     * This array contains triplets of references.
-     * For each i-th datanode the block belongs to
-     * triplets[3*i] is the reference to the DatanodeDescriptor
-     * and triplets[3*i+1] and triplets[3*i+2] are references 
-     * to the previous and the next blocks, respectively, in the 
-     * list of blocks belonging to this data-node.
-     */
-    private Object[] triplets;
-
-    BlockInfo(Block blk, int replication) {
-      super(blk);
-      this.triplets = new Object[3*replication];
-      this.inode = null;
-    }
-
-    INodeFile getINode() {
-      return inode;
-    }
-
-    DatanodeDescriptor getDatanode(int index) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
-      DatanodeDescriptor node = (DatanodeDescriptor)triplets[index*3];
-      assert node == null || 
-          DatanodeDescriptor.class.getName().equals(node.getClass().getName()) : 
-                "DatanodeDescriptor is expected at " + index*3;
-      return node;
-    }
-
-    BlockInfo getPrevious(int index) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
-      BlockInfo info = (BlockInfo)triplets[index*3+1];
-      assert info == null || 
-          BlockInfo.class.getName().equals(info.getClass().getName()) : 
-                "BlockInfo is expected at " + index*3;
-      return info;
-    }
-
-    BlockInfo getNext(int index) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
-      BlockInfo info = (BlockInfo)triplets[index*3+2];
-      assert info == null || 
-          BlockInfo.class.getName().equals(info.getClass().getName()) : 
-                "BlockInfo is expected at " + index*3;
-      return info;
-    }
-
-    void setDatanode(int index, DatanodeDescriptor node) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
-      triplets[index*3] = node;
-    }
-
-    void setPrevious(int index, BlockInfo to) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
-      triplets[index*3+1] = to;
-    }
-
-    void setNext(int index, BlockInfo to) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
-      triplets[index*3+2] = to;
-    }
-
-    private int getCapacity() {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert triplets.length % 3 == 0 : "Malformed BlockInfo";
-      return triplets.length / 3;
-    }
-
-    /**
-     * Ensure that there is enough  space to include num more triplets.
-     *      * @return first free triplet index.
-     */
-    private int ensureCapacity(int num) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      int last = numNodes();
-      if(triplets.length >= (last+num)*3)
-        return last;
-      /* Not enough space left. Create a new array. Should normally 
-       * happen only when replication is manually increased by the user. */
-      Object[] old = triplets;
-      triplets = new Object[(last+num)*3];
-      for(int i=0; i < last*3; i++) {
-        triplets[i] = old[i];
-      }
-      return last;
-    }
-
-    /**
-     * Count the number of data-nodes the block belongs to.
-     */
-    int numNodes() {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert triplets.length % 3 == 0 : "Malformed BlockInfo";
-      for(int idx = getCapacity()-1; idx >= 0; idx--) {
-        if(getDatanode(idx) != null)
-          return idx+1;
-      }
-      return 0;
-    }
-
-    /**
-     * Add data-node this block belongs to.
-     */
-    boolean addNode(DatanodeDescriptor node) {
-      if(findDatanode(node) >= 0) // the node is already there
-        return false;
-      // find the last null node
-      int lastNode = ensureCapacity(1);
-      setDatanode(lastNode, node);
-      setNext(lastNode, null);
-      setPrevious(lastNode, null);
-      return true;
-    }
-
-    /**
-     * Remove data-node from the block.
-     */
-    boolean removeNode(DatanodeDescriptor node) {
-      int dnIndex = findDatanode(node);
-      if(dnIndex < 0) // the node is not found
-        return false;
-      assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : 
-        "Block is still in the list and must be removed first.";
-      // find the last not null node
-      int lastNode = numNodes()-1; 
-      // replace current node triplet by the lastNode one 
-      setDatanode(dnIndex, getDatanode(lastNode));
-      setNext(dnIndex, getNext(lastNode)); 
-      setPrevious(dnIndex, getPrevious(lastNode)); 
-      // set the last triplet to null
-      setDatanode(lastNode, null);
-      setNext(lastNode, null); 
-      setPrevious(lastNode, null); 
-      return true;
-    }
-
-    /**
-     * Find specified DatanodeDescriptor.
-     * @param dn
-     * @return index or -1 if not found.
-     */
-    int findDatanode(DatanodeDescriptor dn) {
-      int len = getCapacity();
-      for(int idx = 0; idx < len; idx++) {
-        DatanodeDescriptor cur = getDatanode(idx);
-        if(cur == dn)
-          return idx;
-        if(cur == null)
-          break;
-      }
-      return -1;
-    }
-
-    /**
-     * Insert this block into the head of the list of blocks 
-     * related to the specified DatanodeDescriptor.
-     * If the head is null then form a new list.
-     * @return current block as the new head of the list.
-     */
-    BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) {
-      int dnIndex = this.findDatanode(dn);
-      assert dnIndex >= 0 : "Data node is not found: current";
-      assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : 
-              "Block is already in the list and cannot be inserted.";
-      this.setPrevious(dnIndex, null);
-      this.setNext(dnIndex, head);
-      if(head != null)
-        head.setPrevious(head.findDatanode(dn), this);
-      return this;
-    }
-
-    /**
-     * Remove this block from the list of blocks 
-     * related to the specified DatanodeDescriptor.
-     * If this block is the head of the list then return the next block as 
-     * the new head.
-     * @return the new head of the list or null if the list becomes
-     * empty after deletion.
-     */
-    BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
-      if(head == null)
-        return null;
-      int dnIndex = this.findDatanode(dn);
-      if(dnIndex < 0) // this block is not on the data-node list
-        return head;
-
-      BlockInfo next = this.getNext(dnIndex);
-      BlockInfo prev = this.getPrevious(dnIndex);
-      this.setNext(dnIndex, null);
-      this.setPrevious(dnIndex, null);
-      if(prev != null)
-        prev.setNext(prev.findDatanode(dn), next);
-      if(next != null)
-        next.setPrevious(next.findDatanode(dn), prev);
-      if(this == head)  // removing the head
-        head = next;
-      return head;
-    }
-
-    int listCount(DatanodeDescriptor dn) {
-      int count = 0;
-      for(BlockInfo cur = this; cur != null;
-            cur = cur.getNext(cur.findDatanode(dn)))
-        count++;
-      return count;
-    }
-
-    boolean listIsConsistent(DatanodeDescriptor dn) {
-      // going forward
-      int count = 0;
-      BlockInfo next, nextPrev;
-      BlockInfo cur = this;
-      while(cur != null) {
-        next = cur.getNext(cur.findDatanode(dn));
-        if(next != null) {
-          nextPrev = next.getPrevious(next.findDatanode(dn));
-          if(cur != nextPrev) {
-            System.out.println("Inconsistent list: cur->next->prev != cur");
-            return false;
-          }
-        }
-        cur = next;
-        count++;
-      }
-      return true;
-    }
-
-    @Override
-    public int hashCode() {
-      // Super implementation is sufficient
-      return super.hashCode();
-    }
-    
-    @Override
-    public boolean equals(Object obj) {
-      // Sufficient to rely on super's implementation
-      return (this == obj) || super.equals(obj);
-    }
-  }
-
   private static class NodeIterator implements Iterator<DatanodeDescriptor> {
     private BlockInfo blockInfo;
     private int nextIdx = 0;
@@ -320,29 +67,22 @@
     this.map = new HashMap<BlockInfo, BlockInfo>(initialCapacity, loadFactor);
   }
 
-  /**
-   * Add BlockInfo if mapping does not exist.
-   */
-  private BlockInfo checkBlockInfo(Block b, int replication) {
-    BlockInfo info = map.get(b);
-    if (info == null) {
-      info = new BlockInfo(b, replication);
-      map.put(info, info);
-    }
-    return info;
-  }
-
   INodeFile getINode(Block b) {
     BlockInfo info = map.get(b);
-    return (info != null) ? info.inode : null;
+    return (info != null) ? info.getINode() : null;
   }
 
   /**
    * Add block b belonging to the specified file inode to the map.
    */
   BlockInfo addINode(Block b, INodeFile iNode) {
-    BlockInfo info = checkBlockInfo(b, iNode.getReplication());
-    info.inode = iNode;
+    int replication = iNode.getReplication();
+    BlockInfo info = map.get(b);
+    if (info == null) {
+      info = new BlockInfo(b, replication);
+      map.put(info, info);
+    }
+    info.setINode(iNode);
     return info;
   }
 
@@ -356,7 +96,7 @@
     if (blockInfo == null)
       return;
 
-    blockInfo.inode = null;
+    blockInfo.setINode(null);
     for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
       DatanodeDescriptor dn = blockInfo.getDatanode(idx);
       dn.removeBlock(blockInfo); // remove from the list and wipe the location
@@ -379,15 +119,6 @@
     return info == null ? 0 : info.numNodes();
   }
 
-  /** returns true if the node does not already exists and is added.
-   * false if the node already exists.*/
-  boolean addNode(Block b, DatanodeDescriptor node, int replication) {
-    // insert into the map if not there yet
-    BlockInfo info = checkBlockInfo(b, replication);
-    // add block to the data-node list and the node to the block info
-    return node.addBlock(info);
-  }
-
   /**
    * Remove data-node reference from the block.
    * Remove the block from the block map
@@ -402,7 +133,7 @@
     boolean removed = node.removeBlock(info);
 
     if (info.getDatanode(0) == null     // no datanodes left
-              && info.inode == null) {  // does not belong to a file
+              && info.getINode() == null) {  // does not belong to a file
       map.remove(b);  // remove block from the map
     }
     return removed;
@@ -437,7 +168,7 @@
   }
   
   /** Get the capacity of the HashMap that stores blocks */
-  public int getCapacity() {
+  int getCapacity() {
     // Capacity doubles every time the map size reaches the threshold
     while (map.size() > (int)(capacity * loadFactor)) {
       capacity <<= 1;
@@ -446,7 +177,7 @@
   }
   
   /** Get the load factor of the map */
-  public float getLoadFactor() {
+  float getLoadFactor() {
     return loadFactor;
   }
 }

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java Mon Aug  3 23:34:04 2009
@@ -25,7 +25,6 @@
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.io.Text;

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Aug  3 23:34:04 2009
@@ -33,7 +33,6 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 /*************************************************
  * FSDirectory stores the filesystem directory state.
@@ -277,8 +276,7 @@
                   fileNode.getPreferredBlockSize()*fileNode.getReplication());
       
       // associate the new list of blocks with this file
-      getBlockManager().addINode(block, fileNode);
-      BlockInfo blockInfo = getBlockManager().getStoredBlock(block);
+      BlockInfo blockInfo = getBlockManager().addINode(block, fileNode);
       fileNode.addBlock(blockInfo);
 
       NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
@@ -308,8 +306,10 @@
    */
   void closeFile(String path, INodeFile file) {
     waitForReady();
+    long now = FSNamesystem.now();
     synchronized (rootDir) {
       // file is closed
+      file.setModificationTimeForce(now);
       fsImage.getEditLog().logCloseFile(path, file);
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: "

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Aug  3 23:34:04 2009
@@ -320,7 +320,7 @@
    * @param es - stream to remove
    * @return the matching stream
    */
-  public StorageDirectory getStorage(EditLogOutputStream es) {
+  StorageDirectory getStorage(EditLogOutputStream es) {
     String parentStorageDir = ((EditLogFileOutputStream)es).getFile()
     .getParentFile().getParentFile().getAbsolutePath();
 
@@ -339,7 +339,7 @@
    * @param sd
    * @return the matching stream
    */
-  public EditLogOutputStream getEditsStream(StorageDirectory sd) {
+  synchronized EditLogOutputStream getEditsStream(StorageDirectory sd) {
 	for (EditLogOutputStream es : editStreams) {
 	  File parentStorageDir = ((EditLogFileOutputStream)es).getFile()
 	  .getParentFile().getParentFile();
@@ -780,68 +780,76 @@
 
     // Fetch the transactionId of this thread. 
     long mytxid = myTransactionId.get().txid;
-
-    synchronized (this) {
-      assert editStreams.size() > 0 : "no editlog streams";
-      printStatistics(false);
-
-      // if somebody is already syncing, then wait
-      while (mytxid > synctxid && isSyncRunning) {
-        try {
-          wait(1000);
-        } catch (InterruptedException ie) { 
+    EditLogOutputStream streams[] = null;
+    boolean sync = false;
+    try {
+      synchronized (this) {
+        assert editStreams.size() > 0 : "no editlog streams";
+        printStatistics(false);
+  
+        // if somebody is already syncing, then wait
+        while (mytxid > synctxid && isSyncRunning) {
+          try {
+            wait(1000);
+          } catch (InterruptedException ie) { 
+          }
         }
-      }
-
-      //
-      // If this transaction was already flushed, then nothing to do
-      //
-      if (mytxid <= synctxid) {
-        numTransactionsBatchedInSync++;
-        if (metrics != null) // Metrics is non-null only when used inside name node
-          metrics.transactionsBatchedInSync.inc();
-        return;
-      }
-   
-      // now, this thread will do the sync
-      syncStart = txid;
-      isSyncRunning = true;   
-
-      // swap buffers
-      for(EditLogOutputStream eStream : editStreams) {
-        eStream.setReadyToFlush();
-      }
-    }
-
-    // do the sync
-    long start = FSNamesystem.now();
-    for (int idx = 0; idx < editStreams.size(); idx++) {
-      EditLogOutputStream eStream = editStreams.get(idx);
-      try {
-        eStream.flush();
-      } catch (IOException ie) {
+  
         //
-        // remember the streams that encountered an error.
+        // If this transaction was already flushed, then nothing to do
         //
-        if (errorStreams == null) {
-          errorStreams = new ArrayList<EditLogOutputStream>(1);
+        if (mytxid <= synctxid) {
+          numTransactionsBatchedInSync++;
+          if (metrics != null) // Metrics is non-null only when used inside name node
+            metrics.transactionsBatchedInSync.inc();
+          return;
         }
-        errorStreams.add(eStream);
-        FSNamesystem.LOG.error("Unable to sync edit log. " +
-                               "Fatal Error.");
+     
+        // now, this thread will do the sync
+        syncStart = txid;
+        isSyncRunning = true;
+        sync = true;
+  
+        // swap buffers
+        for(EditLogOutputStream eStream : editStreams) {
+          eStream.setReadyToFlush();
+        }
+        streams = 
+          editStreams.toArray(new EditLogOutputStream[editStreams.size()]);
       }
+  
+      // do the sync
+      long start = FSNamesystem.now();
+      for (int idx = 0; idx < streams.length; idx++) {
+        EditLogOutputStream eStream = streams[idx];
+        try {
+          eStream.flush();
+        } catch (IOException ie) {
+          //
+          // remember the streams that encountered an error.
+          //
+          if (errorStreams == null) {
+            errorStreams = new ArrayList<EditLogOutputStream>(1);
+          }
+          errorStreams.add(eStream);
+          FSNamesystem.LOG.error("Unable to sync edit log. " +
+                                 "Fatal Error.");
+        }
+      }
+      long elapsed = FSNamesystem.now() - start;
+      processIOError(errorStreams, true);
+  
+      if (metrics != null) // Metrics non-null only when used inside name node
+        metrics.syncs.inc(elapsed);
+    } finally {
+      synchronized (this) {
+        synctxid = syncStart;
+        if (sync) {
+          isSyncRunning = false;
+        }
+        this.notifyAll();
+     }
     }
-    long elapsed = FSNamesystem.now() - start;
-
-    synchronized (this) {
-       processIOError(errorStreams, true);
-       synctxid = syncStart;
-       isSyncRunning = false;
-       this.notifyAll();
-    }
-
-    if (metrics != null) // Metrics is non-null only when used inside name node
-      metrics.syncs.inc(elapsed);
   }
 
   //
@@ -1030,14 +1038,6 @@
     return size;
   }
   
-  public String listEditsStreams() {
-    StringBuffer buf = new StringBuffer();
-    for (EditLogOutputStream os : editStreams) {
-      buf.append(os.getName()  + ";");
-    }
-    return buf.toString();
-  }
-
   /**
    * Closes the current edit log and opens edits.new. 
    */
@@ -1272,7 +1272,7 @@
    * @param nnReg this (active) name-node registration.
    * @throws IOException
    */
-  void logJSpoolStart(NamenodeRegistration bnReg, // backup node
+  synchronized void logJSpoolStart(NamenodeRegistration bnReg, // backup node
                       NamenodeRegistration nnReg) // active name-node
   throws IOException {
     if(bnReg.isRole(NamenodeRole.CHECKPOINT))
@@ -1331,22 +1331,27 @@
     }
 
     public boolean hasNext() {
-      if(editStreams == null || 
-         editStreams.isEmpty() || nextIndex >= editStreams.size())
-        return false;
-      while(nextIndex < editStreams.size()
-            && !editStreams.get(nextIndex).getType().isOfType(type))
-        nextIndex++;
-      return nextIndex < editStreams.size();
+      synchronized(FSEditLog.this) {
+        if(editStreams == null || 
+           editStreams.isEmpty() || nextIndex >= editStreams.size())
+          return false;
+        while(nextIndex < editStreams.size()
+              && !editStreams.get(nextIndex).getType().isOfType(type))
+          nextIndex++;
+        return nextIndex < editStreams.size();
+      }
     }
 
     public EditLogOutputStream next() {
-      EditLogOutputStream stream = editStreams.get(nextIndex);
-      prevIndex = nextIndex;
-      nextIndex++;
-      while(nextIndex < editStreams.size()
-          && !editStreams.get(nextIndex).getType().isOfType(type))
-      nextIndex++;
+      EditLogOutputStream stream = null;
+      synchronized(FSEditLog.this) {
+        stream = editStreams.get(nextIndex);
+        prevIndex = nextIndex;
+        nextIndex++;
+        while(nextIndex < editStreams.size()
+            && !editStreams.get(nextIndex).getType().isOfType(type))
+        nextIndex++;
+      }
       return stream;
     }
 
@@ -1357,9 +1362,11 @@
     }
 
     void replace(EditLogOutputStream newStream) {
-      assert 0 <= prevIndex && prevIndex < editStreams.size() :
-                                                        "Index out of bound.";
-      editStreams.set(prevIndex, newStream);
+      synchronized (FSEditLog.this) {
+        assert 0 <= prevIndex && prevIndex < editStreams.size() :
+                                                          "Index out of bound.";
+        editStreams.set(prevIndex, newStream);
+      }
     }
   }
 

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Mon Aug  3 23:34:04 2009
@@ -58,7 +58,6 @@
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Aug  3 23:34:04 2009
@@ -26,7 +26,6 @@
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.AccessControlException;
@@ -582,7 +581,7 @@
    * return the length of the added block; 0 if the block is not added
    */
   private long addBlock(Block block, List<BlockWithLocations> results) {
-    ArrayList<String> machineSet = blockManager.addBlock(block);
+    ArrayList<String> machineSet = blockManager.getValidLocations(block);
     if(machineSet.size() == 0) {
       return 0;
     } else {
@@ -1338,7 +1337,7 @@
    */
   public synchronized void markBlockAsCorrupt(Block blk, DatanodeInfo dn)
     throws IOException {
-    blockManager.markBlockAsCorrupt(blk, dn);
+    blockManager.findAndMarkBlockAsCorrupt(blk, dn);
   }
 
 

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Mon Aug  3 23:34:04 2009
@@ -24,7 +24,6 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 class INodeFile extends INode {
   static final FsPermission UMASK = FsPermission.createImmutable((short)0111);

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Mon Aug  3 23:34:04 2009
@@ -21,7 +21,6 @@
 
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 
 class INodeFileUnderConstruction extends INodeFile {

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Mon Aug  3 23:34:04 2009
@@ -818,6 +818,7 @@
   /**
    * Returns the size of the current edit log.
    */
+  @Deprecated
   public long getEditLogSize() throws IOException {
     return namesystem.getEditLogSize();
   }
@@ -825,6 +826,7 @@
   /**
    * Roll the edit log.
    */
+  @Deprecated
   public CheckpointSignature rollEditLog() throws IOException {
     return namesystem.rollEditLog();
   }
@@ -832,6 +834,7 @@
   /**
    * Roll the image 
    */
+  @Deprecated
   public void rollFsImage() throws IOException {
     namesystem.rollFSImage();
   }
@@ -1150,9 +1153,11 @@
       case FORMAT:
         boolean aborted = format(conf, true);
         System.exit(aborted ? 1 : 0);
+        return null; // avoid javac warning
       case FINALIZE:
         aborted = finalize(conf, true);
         System.exit(aborted ? 1 : 0);
+        return null; // avoid javac warning
       case BACKUP:
       case CHECKPOINT:
         return new BackupNode(conf, startOpt.toNodeRole());

Modified: hadoop/hdfs/branches/HDFS-265/src/test/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/findbugsExcludeFile.xml?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/findbugsExcludeFile.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/findbugsExcludeFile.xml Mon Aug  3 23:34:04 2009
@@ -207,9 +207,17 @@
        <Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
      </Match>
 
+     <!--
+       CreateBlockWriteStreams and getTmpInputStreams are pretty much like a stream constructor.
+       The newly created streams are not supposed to be closed in the constructor. So ignore
+       the OBL warning.
+     -->
      <Match>
-       <Class name="org.apache.hadoop.examples.ContextFactory" />
-       <Method name="setAttributes" />
+       <Class name="org.apache.hadoop.hdfs.server.datanode.FSDataset" />
+       <Or>
+         <Method name="createBlockWriteStreams" />
+         <Method name="getTmpInputStreams" />
+       </Or>
        <Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
      </Match>
 

Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Aug  3 23:34:04 2009
@@ -1,2 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
+/hadoop/hdfs/trunk/src/test/hdfs:796829-800617

Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Aug  3 23:34:04 2009
@@ -1,2 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs-with-mr:713112
 /hadoop/core/trunk/src/test/hdfs-with-mr:776175-784663
+/hadoop/hdfs/trunk/src/test/hdfs-with-mr:796829-800617

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java Mon Aug  3 23:34:04 2009
@@ -50,10 +50,10 @@
   
       //try reading the block by someone
       DFSClient dfsclient = new DFSClient(CONF);
-      LocatedBlocks blocks = dfsclient.namenode.getBlockLocations(src, 0, 1);
+      LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
       LocatedBlock b = blocks.get(0); 
       try {
-        dfsclient.namenode.abandonBlock(b.getBlock(), src, "someone");
+        dfsclient.getNamenode().abandonBlock(b.getBlock(), src, "someone");
         //previous line should throw an exception.
         assertTrue(false);
       }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Mon Aug  3 23:34:04 2009
@@ -21,10 +21,18 @@
 import java.io.InputStream;
 import java.io.OutputStream;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.server.common.*;
+import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
+import org.apache.hadoop.io.*;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
 
 import junit.framework.TestCase;
 
@@ -34,6 +42,8 @@
  * properly in case of errors.
  */
 public class TestDFSClientRetries extends TestCase {
+  public static final Log LOG =
+    LogFactory.getLog(TestDFSClientRetries.class.getName());
   
   // writes 'len' bytes of data to out.
   private static void writeData(OutputStream out, int len) throws IOException {
@@ -97,4 +107,132 @@
   }
   
   // more tests related to different failure cases can be added here.
+  
+  class TestNameNode implements ClientProtocol
+  {
+    int num_calls = 0;
+    
+    // The total number of calls that can be made to addBlock
+    // before an exception is thrown
+    int num_calls_allowed; 
+    public final String ADD_BLOCK_EXCEPTION = "Testing exception thrown from"
+                                             + "TestDFSClientRetries::"
+                                             + "TestNameNode::addBlock";
+    public final String RETRY_CONFIG
+          = "dfs.client.block.write.locateFollowingBlock.retries";
+          
+    public TestNameNode(Configuration conf) throws IOException
+    {
+      // +1 because the configuration value is the number of retries and
+      // the first call is not a retry (e.g., 2 retries == 3 total
+      // calls allowed)
+      this.num_calls_allowed = conf.getInt(RETRY_CONFIG, 5) + 1;
+    }
+
+    public long getProtocolVersion(String protocol, 
+                                     long clientVersion)
+    throws IOException
+    {
+      return versionID;
+    }
+
+    public LocatedBlock addBlock(String src, String clientName)
+    throws IOException
+    {
+      num_calls++;
+      if (num_calls > num_calls_allowed) { 
+        throw new IOException("addBlock called more times than "
+                              + RETRY_CONFIG
+                              + " allows.");
+      } else {
+          throw new RemoteException(NotReplicatedYetException.class.getName(),
+                                    ADD_BLOCK_EXCEPTION);
+      }
+    }
+    
+    
+    // The following methods are stub methods that are not needed by this mock class
+    
+    public LocatedBlocks  getBlockLocations(String src, long offset, long length) throws IOException { return null; }
+    
+    public void create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, short replication, long blockSize) throws IOException {}
+    
+    public LocatedBlock append(String src, String clientName) throws IOException { return null; }
+
+    public boolean setReplication(String src, short replication) throws IOException { return false; }
+
+    public void setPermission(String src, FsPermission permission) throws IOException {}
+
+    public void setOwner(String src, String username, String groupname) throws IOException {}
+
+    public void abandonBlock(Block b, String src, String holder) throws IOException {}
+
+    public boolean complete(String src, String clientName) throws IOException { return false; }
+
+    public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {}
+
+    public boolean rename(String src, String dst) throws IOException { return false; }
+
+    public boolean delete(String src) throws IOException { return false; }
+
+    public boolean delete(String src, boolean recursive) throws IOException { return false; }
+
+    public boolean mkdirs(String src, FsPermission masked) throws IOException { return false; }
+
+    public FileStatus[] getListing(String src) throws IOException { return null; }
+
+    public void renewLease(String clientName) throws IOException {}
+
+    public long[] getStats() throws IOException { return null; }
+
+    public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type) throws IOException { return null; }
+
+    public long getPreferredBlockSize(String filename) throws IOException { return 0; }
+
+    public boolean setSafeMode(FSConstants.SafeModeAction action) throws IOException { return false; }
+
+    public void saveNamespace() throws IOException {}
+
+    public boolean restoreFailedStorage(String arg) throws AccessControlException { return false; }
+
+    public void refreshNodes() throws IOException {}
+
+    public void finalizeUpgrade() throws IOException {}
+
+    public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) throws IOException { return null; }
+
+    public void metaSave(String filename) throws IOException {}
+
+    public FileStatus getFileInfo(String src) throws IOException { return null; }
+
+    public ContentSummary getContentSummary(String path) throws IOException { return null; }
+
+    public void setQuota(String path, long namespaceQuota, long diskspaceQuota) throws IOException {}
+
+    public void fsync(String src, String client) throws IOException {}
+
+    public void setTimes(String src, long mtime, long atime) throws IOException {}
+
+  }
+  
+  public void testNotYetReplicatedErrors() throws IOException
+  {   
+    Configuration conf = new Configuration();
+    
+    // allow 1 retry (2 total calls)
+    conf.setInt("dfs.client.block.write.locateFollowingBlock.retries", 1);
+        
+    TestNameNode tnn = new TestNameNode(conf);
+    DFSClient client = new DFSClient(tnn, tnn, conf, null);
+    OutputStream os = client.create("testfile", true);
+    os.write(20); // write one random byte
+    
+    try {
+      os.close();
+    } catch (Exception e) {
+      assertTrue("Retries are not being stopped correctly",
+           e.getMessage().equals(tnn.ADD_BLOCK_EXCEPTION));
+    }
+  }
+  
 }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java Mon Aug  3 23:34:04 2009
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.Random;
 
 import javax.security.auth.login.LoginException;
@@ -159,9 +160,9 @@
     // create the file/directory
     switch (op) {
     case CREATE:
-      FSDataOutputStream out = fs.create(name, permission, true, conf.getInt(
-          "io.file.buffer.size", 4096), fs.getDefaultReplication(), fs
-          .getDefaultBlockSize(), null);
+      FSDataOutputStream out = fs.create(name, permission, EnumSet.of(CreateFlag.OVERWRITE), 
+          conf.getInt("io.file.buffer.size", 4096),
+          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
       out.close();
       break;
     case MKDIRS:

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Mon Aug  3 23:34:04 2009
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.READ_BLOCK;
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.WRITE_BLOCK;
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR;
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.SUCCESS;
+
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
@@ -168,13 +173,13 @@
     // bad ops
     sendBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK - 1);
+    sendOut.writeByte(WRITE_BLOCK.code - 1);
     sendRecvData("Wrong Op Code", true);
     
     /* Test OP_WRITE_BLOCK */
     sendBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
+    WRITE_BLOCK.write(sendOut);
     sendOut.writeLong(newBlockId); // block id
     sendOut.writeLong(0);          // generation stamp
     sendOut.writeInt(0);           // targets in pipeline 
@@ -188,13 +193,13 @@
     // bad bytes per checksum
     sendOut.writeInt(-1-random.nextInt(oneMil));
     recvBuf.reset();
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
+    ERROR.write(recvOut);
     sendRecvData("wrong bytesPerChecksum while writing", true);
 
     sendBuf.reset();
     recvBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
+    WRITE_BLOCK.write(sendOut);
     sendOut.writeLong(newBlockId);
     sendOut.writeLong(0);          // generation stamp
     sendOut.writeInt(0);           // targets in pipeline 
@@ -204,13 +209,13 @@
 
     // bad number of targets
     sendOut.writeInt(-1-random.nextInt(oneMil));
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
+    ERROR.write(recvOut);
     sendRecvData("bad targets len while writing block " + newBlockId, true);
 
     sendBuf.reset();
     recvBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
+    WRITE_BLOCK.write(sendOut);
     sendOut.writeLong(++newBlockId);
     sendOut.writeLong(0);          // generation stamp
     sendOut.writeInt(0);           // targets in pipeline 
@@ -228,10 +233,10 @@
     
     // bad data chunk length
     sendOut.writeInt(-1-random.nextInt(oneMil));
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_SUCCESS);
+    SUCCESS.write(recvOut);
     Text.writeString(recvOut, ""); // first bad node
     recvOut.writeLong(100);        // sequencenumber
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
+    ERROR.write(recvOut);
     sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, 
                  true);
 
@@ -239,7 +244,7 @@
     sendBuf.reset();
     recvBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
+    WRITE_BLOCK.write(sendOut);
     sendOut.writeLong(++newBlockId);
     sendOut.writeLong(0);          // generation stamp
     sendOut.writeInt(0);           // targets in pipeline 
@@ -258,10 +263,10 @@
     sendOut.writeInt(0);           // chunk length
     sendOut.writeInt(0);           // zero checksum
     //ok finally write a block with 0 len
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_SUCCESS);
+    SUCCESS.write(recvOut);
     Text.writeString(recvOut, ""); // first bad node
     recvOut.writeLong(100);        // sequencenumber
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_SUCCESS);
+    SUCCESS.write(recvOut);
     sendRecvData("Writing a zero len block blockid " + newBlockId, false);
     
     /* Test OP_READ_BLOCK */
@@ -270,13 +275,13 @@
     sendBuf.reset();
     recvBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    READ_BLOCK.write(sendOut);
     newBlockId = firstBlock.getBlockId()-1;
     sendOut.writeLong(newBlockId);
     sendOut.writeLong(firstBlock.getGenerationStamp());
     sendOut.writeLong(0L);
     sendOut.writeLong(fileLen);
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
+    ERROR.write(recvOut);
     Text.writeString(sendOut, "cl");
     AccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
@@ -284,7 +289,7 @@
     // negative block start offset
     sendBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    READ_BLOCK.write(sendOut);
     sendOut.writeLong(firstBlock.getBlockId());
     sendOut.writeLong(firstBlock.getGenerationStamp());
     sendOut.writeLong(-1L);
@@ -297,7 +302,7 @@
     // bad block start offset
     sendBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    READ_BLOCK.write(sendOut);
     sendOut.writeLong(firstBlock.getBlockId());
     sendOut.writeLong(firstBlock.getGenerationStamp());
     sendOut.writeLong(fileLen);
@@ -309,10 +314,10 @@
     
     // negative length is ok. Datanode assumes we want to read the whole block.
     recvBuf.reset();
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_SUCCESS);    
+    SUCCESS.write(recvOut);    
     sendBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    READ_BLOCK.write(sendOut);
     sendOut.writeLong(firstBlock.getBlockId());
     sendOut.writeLong(firstBlock.getGenerationStamp());
     sendOut.writeLong(0);
@@ -324,10 +329,10 @@
     
     // length is more than size of block.
     recvBuf.reset();
-    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);    
+    ERROR.write(recvOut);    
     sendBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    READ_BLOCK.write(sendOut);
     sendOut.writeLong(firstBlock.getBlockId());
     sendOut.writeLong(firstBlock.getGenerationStamp());
     sendOut.writeLong(0);
@@ -340,7 +345,7 @@
     //At the end of all this, read the file to make sure that succeeds finally.
     sendBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    READ_BLOCK.write(sendOut);
     sendOut.writeLong(firstBlock.getBlockId());
     sendOut.writeLong(firstBlock.getGenerationStamp());
     sendOut.writeLong(0);

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Mon Aug  3 23:34:04 2009
@@ -171,7 +171,7 @@
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
                                         cluster.getNameNodePort()), conf);
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       try {
@@ -190,7 +190,7 @@
 
     // We have 2 good replicas and block is not corrupt
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       try {
@@ -218,7 +218,7 @@
     // We now have the blocks to be marked as corrupt and we get back all
     // its replicas
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       try {
@@ -282,7 +282,7 @@
     
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
                                         cluster.getNameNodePort()), conf);
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
 
@@ -294,7 +294,7 @@
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }
@@ -332,7 +332,7 @@
     }
     
     // Loop until the block recovers after replication
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
     while (replicaCount != numReplicas) {
@@ -341,7 +341,7 @@
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }
@@ -358,7 +358,7 @@
       }
       corruptReplicaSize = cluster.getNamesystem().
                             numCorruptReplicas(blk);
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java Mon Aug  3 23:34:04 2009
@@ -21,12 +21,14 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.RandomAccessFile;
+import java.util.EnumSet;
 import java.util.Random;
 
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -54,7 +56,7 @@
   private void writeFile(FileSystem fileSys, Path name) throws IOException {
     // create and write a file that contains three blocks of data
     FSDataOutputStream stm = fileSys.create(name, new FsPermission((short)0777),
-        true, fileSys.getConf().getInt("io.file.buffer.size", 4096),
+        EnumSet.of(CreateFlag.OVERWRITE), fileSys.getConf().getInt("io.file.buffer.size", 4096),
         NUM_OF_DATANODES, BLOCK_SIZE, null);
     stm.write(expected);
     stm.close();

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Mon Aug  3 23:34:04 2009
@@ -168,7 +168,7 @@
       assertTrue("There should be only one datanode but found " + dn.length,
                   dn.length == 1);
 
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       List<LocatedBlock> blocks = locations.getLocatedBlocks();
       FSDataset dataset = (FSDataset) dn[0].data;

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Mon Aug  3 23:34:04 2009
@@ -160,7 +160,7 @@
 
     //b. Log into one datanode that has one replica of this block.
     //   Find the block file on this datanode and truncate it to zero size.
-    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(p.toString(), 0L, len1);
+    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
     assertEquals(1, locatedblocks.locatedBlockCount());
     final LocatedBlock lb = locatedblocks.get(0);
     final Block blk = lb.getBlock();
@@ -224,7 +224,7 @@
 
     //check block sizes 
     final long len = fs.getFileStatus(pnew).getLen();
-    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(pnew.toString(), 0L, len);
+    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len);
     final int numblock = locatedblocks.locatedBlockCount();
     for(int i = 0; i < numblock; i++) {
       final LocatedBlock lb = locatedblocks.get(i);

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Mon Aug  3 23:34:04 2009
@@ -372,7 +372,7 @@
 
       // verify that no blocks are associated with this file
       // bad block allocations were cleaned up earlier.
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up",
@@ -411,18 +411,18 @@
       System.out.println("testFileCreationError2: "
                          + "Created file filestatus.dat with one replicas.");
 
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("testFileCreationError2: "
           + "The file has " + locations.locatedBlockCount() + " blocks.");
 
       // add another block to the file
-      LocatedBlock location = client.namenode.addBlock(file1.toString(), 
+      LocatedBlock location = client.getNamenode().addBlock(file1.toString(), 
           client.clientName);
       System.out.println("testFileCreationError2: "
           + "Added block " + location.getBlock());
 
-      locations = client.namenode.getBlockLocations(file1.toString(), 
+      locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
       int count = locations.locatedBlockCount();
       System.out.println("testFileCreationError2: "
@@ -439,7 +439,7 @@
       }
 
       // verify that the last block was synchronized.
-      locations = client.namenode.getBlockLocations(file1.toString(), 
+      locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
       System.out.println("testFileCreationError2: "
           + "locations = " + locations.locatedBlockCount());
@@ -567,14 +567,14 @@
 
       // verify that new block is associated with this file
       DFSClient client = ((DistributedFileSystem)fs).dfs;
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file1,
                  locations.locatedBlockCount() == 3);
 
       // verify filestatus2.dat
-      locations = client.namenode.getBlockLocations(
+      locations = client.getNamenode().getBlockLocations(
                                   file2.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file2,
@@ -790,7 +790,7 @@
       // wait for the lease to expire
       try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}
 
-      LocatedBlocks locations = dfs.dfs.namenode.getBlockLocations(
+      LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(
           f, 0, Long.MAX_VALUE);
       assertEquals(1, locations.locatedBlockCount());
       LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java Mon Aug  3 23:34:04 2009
@@ -76,7 +76,7 @@
       boolean notWritten;
       do {
         DFSClient dfsclient = new DFSClient(CONF);
-        locatedBlocks = dfsclient.namenode.
+        locatedBlocks = dfsclient.getNamenode().
           getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
         assertEquals(2, locatedBlocks.size());
         notWritten = false;

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Mon Aug  3 23:34:04 2009
@@ -148,7 +148,7 @@
       writeFile(cluster.getFileSystem(), testPath, numDataNodes);
 
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, 20);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
 
       
       Block[][] blocksList = cluster.getAllBlockReports();
@@ -188,7 +188,7 @@
                                   cluster.getNameNodePort()),
                                   conf);
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
       
     } finally {
       if (cluster != null) {

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java Mon Aug  3 23:34:04 2009
@@ -75,7 +75,7 @@
 
       //get block info for the last block
       LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
-          dfs.dfs.namenode, filestr);
+          dfs.dfs.getNamenode(), filestr);
       DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
       assertEquals(REPLICATION_NUM, datanodeinfos.length);
 

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java Mon Aug  3 23:34:04 2009
@@ -17,26 +17,30 @@
  */
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.net.InetSocketAddress;
 import java.util.Iterator;
 import java.util.Random;
-import java.net.*;
+
+import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.BlockLocation;
 
 /**
  * This class tests the replication of a DFS file.
@@ -167,7 +171,7 @@
     fs.setReplication(file1, (short)2);
   
     // Now get block details and check if the block is corrupt
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     while (blocks.get(0).isCorrupt() != true) {
       try {
@@ -175,7 +179,7 @@
         Thread.sleep(1000);
       } catch (InterruptedException ie) {
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     }
     replicaCount = blocks.get(0).getLocations().length;
@@ -317,10 +321,10 @@
       out.write(buffer);
       out.close();
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
 
       // get first block of the file.
-      String block = dfsClient.namenode.
+      String block = dfsClient.getNamenode().
                        getBlockLocations(testFile, 0, Long.MAX_VALUE).
                        get(0).getBlock().getBlockName();
       
@@ -382,7 +386,7 @@
                                   cluster.getNameNodePort()),
                                   conf);
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
       
     } finally {
       if (cluster != null) {
@@ -432,19 +436,19 @@
     // block replication triggers corrupt block detection
     DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
         cluster.getNameNodePort()), fs.getConf());
-    LocatedBlocks blocks = dfsClient.namenode.getBlockLocations(
+    LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(
         fileName.toString(), 0, fileLen);
     if (lenDelta < 0) { // replica truncated
     	while (!blocks.get(0).isCorrupt() || 
     			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
     		Thread.sleep(100);
-    		blocks = dfsClient.namenode.getBlockLocations(
+    		blocks = dfsClient.getNamenode().getBlockLocations(
     				fileName.toString(), 0, fileLen);
     	}
     } else { // no corruption detected; block replicated
     	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
     		Thread.sleep(100);
-    		blocks = dfsClient.namenode.getBlockLocations(
+    		blocks = dfsClient.getNamenode().getBlockLocations(
     				fileName.toString(), 0, fileLen);
     	}
     }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java Mon Aug  3 23:34:04 2009
@@ -183,6 +183,64 @@
     }
   }
 
+  /**
+   * Tests mod time change at close in DFS.
+   */
+  public void testTimesAtClose() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    int replicas = 1;
+
+    // parameter initialization
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setInt("dfs.datanode.handler.count", 50);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    cluster.waitActive();
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+                                                     cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+    FileSystem fileSys = cluster.getFileSystem();
+    assertTrue(fileSys instanceof DistributedFileSystem);
+
+    try {
+      // create a new file and write to it
+      Path file1 = new Path("/simple.dat");
+      FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
+      System.out.println("Created and wrote file simple.dat");
+      FileStatus statBeforeClose = fileSys.getFileStatus(file1);
+      long mtimeBeforeClose = statBeforeClose.getModificationTime();
+      String mdateBeforeClose = dateForm.format(new Date(
+                                                     mtimeBeforeClose));
+      System.out.println("mtime on " + file1 + " before close is "
+                  + mdateBeforeClose + " (" + mtimeBeforeClose + ")");
+      assertTrue(mtimeBeforeClose != 0);
+
+      //close file after writing
+      stm.close();
+      System.out.println("Closed file.");
+      FileStatus statAfterClose = fileSys.getFileStatus(file1);
+      long mtimeAfterClose = statAfterClose.getModificationTime();
+      String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
+      System.out.println("mtime on " + file1 + " after close is "
+                  + mdateAfterClose + " (" + mtimeAfterClose + ")");
+      assertTrue(mtimeAfterClose != 0);
+      assertTrue(mtimeBeforeClose != mtimeAfterClose);
+
+      cleanupFile(fileSys, file1);
+    } catch (IOException e) {
+      info = client.datanodeReport(DatanodeReportType.ALL);
+      printDatanodeReport(info);
+      throw e;
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     new TestSetTimes().testTimes();
   }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Mon Aug  3 23:34:04 2009
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.REPLACE_BLOCK;
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.*;
+
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -44,7 +47,6 @@
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.datanode.BlockTransferThrottler;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessToken;
@@ -108,7 +110,7 @@
       InetSocketAddress addr = new InetSocketAddress("localhost",
           cluster.getNameNodePort());
       DFSClient client = new DFSClient(addr, CONF);
-      List<LocatedBlock> locatedBlocks = client.namenode.
+      List<LocatedBlock> locatedBlocks = client.getNamenode().
         getBlockLocations("/tmp.txt", 0, DEFAULT_BLOCK_SIZE).getLocatedBlocks();
       assertEquals(1, locatedBlocks.size());
       LocatedBlock block = locatedBlocks.get(0);
@@ -192,7 +194,7 @@
         Thread.sleep(100);
       } catch(InterruptedException e) {
       }
-      List<LocatedBlock> blocks = client.namenode.
+      List<LocatedBlock> blocks = client.getNamenode().
       getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
       assertEquals(1, blocks.size());
       DatanodeInfo[] nodes = blocks.get(0).getLocations();
@@ -227,7 +229,7 @@
     // sendRequest
     DataOutputStream out = new DataOutputStream(sock.getOutputStream());
     out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
-    out.writeByte(DataTransferProtocol.OP_REPLACE_BLOCK);
+    REPLACE_BLOCK.write(out);
     out.writeLong(block.getBlockId());
     out.writeLong(block.getGenerationStamp());
     Text.writeString(out, source.getStorageID());
@@ -237,11 +239,7 @@
     // receiveResponse
     DataInputStream reply = new DataInputStream(sock.getInputStream());
 
-    short status = reply.readShort();
-    if(status == DataTransferProtocol.OP_STATUS_SUCCESS) {
-      return true;
-    }
-    return false;
+    return DataTransferProtocol.Status.read(reply) == SUCCESS;
   }
 
   /**

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Mon Aug  3 23:34:04 2009
@@ -67,7 +67,7 @@
   /** Truncate a block file */
   private long truncateBlockFile() throws IOException {
     synchronized (fds) {
-      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+      for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
         Block b = entry.getKey();
         File f = entry.getValue().getFile();
         File mf = FSDataset.getMetaFile(f, b);
@@ -87,7 +87,7 @@
   /** Delete a block file */
   private long deleteBlockFile() {
     synchronized(fds) {
-      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+      for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
         Block b = entry.getKey();
         File f = entry.getValue().getFile();
         File mf = FSDataset.getMetaFile(f, b);
@@ -104,7 +104,7 @@
   /** Delete block meta file */
   private long deleteMetaFile() {
     synchronized(fds) {
-      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+      for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
         Block b = entry.getKey();
         String blkfile = entry.getValue().getFile().getAbsolutePath();
         long genStamp = b.getGenerationStamp();
@@ -126,7 +126,7 @@
     while (true) {
       id = rand.nextLong();
       Block b = new Block(id);
-      DatanodeBlockInfo info = null;
+      ReplicaInfo info = null;
       synchronized(fds) {
         info = fds.volumeMap.get(b);
       }
@@ -326,7 +326,7 @@
   private void verifyAddition(long blockId, long genStamp, long size) {
     Block memBlock = fds.getBlockKey(blockId);
     assertNotNull(memBlock);
-    DatanodeBlockInfo blockInfo;
+    ReplicaInfo blockInfo;
     synchronized(fds) {
       blockInfo = fds.volumeMap.get(memBlock);
     }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Mon Aug  3 23:34:04 2009
@@ -17,11 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.WRITE_BLOCK;
+
 import java.io.DataOutputStream;
 import java.io.File;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -33,8 +37,6 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessToken;
 
-import junit.framework.TestCase;
-
 /** Test if a datanode can correctly handle errors during block read/write*/
 public class TestDiskError extends TestCase {
   public void testShutdown() throws Exception {
@@ -112,7 +114,7 @@
           s.getOutputStream());
 
       out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );
-      out.write( DataTransferProtocol.OP_WRITE_BLOCK );
+      WRITE_BLOCK.write(out);
       out.writeLong( block.getBlock().getBlockId());
       out.writeLong( block.getBlock().getGenerationStamp() );
       out.writeInt(1);

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Mon Aug  3 23:34:04 2009
@@ -83,7 +83,7 @@
       assertTrue(dfs.getClient().exists(filestr));
 
       //get block info
-      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().namenode, filestr);
+      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().getNamenode(), filestr);
       DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       assertTrue(datanodeinfo.length > 0);
 

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Mon Aug  3 23:34:04 2009
@@ -19,15 +19,12 @@
 
 import java.io.File;
 import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 /**
  * 

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Mon Aug  3 23:34:04 2009
@@ -175,7 +175,7 @@
       String[] fileNames = util.getFileNames(topDir);
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                           cluster.getNameNodePort()), conf);
-      String block = dfsClient.namenode.
+      String block = dfsClient.getNamenode().
                       getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
                       get(0).getBlock().getBlockName();
       File baseDir = new File(System.getProperty("test.build.data",
@@ -315,7 +315,7 @@
 
     dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                cluster.getNameNodePort()), conf);
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
     while (replicaCount != 3) {
@@ -323,7 +323,7 @@
         Thread.sleep(100);
       } catch (InterruptedException ignore) {
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java?rev=800624&r1=800623&r2=800624&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java Mon Aug  3 23:34:04 2009
@@ -18,6 +18,7 @@
 package org.apache.hadoop.security;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
@@ -86,7 +87,7 @@
 
       FsPermission filePerm = new FsPermission((short)0444);
       FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
-          true, conf.getInt("io.file.buffer.size", 4096),
+          EnumSet.of(CreateFlag.OVERWRITE), conf.getInt("io.file.buffer.size", 4096),
           fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
       out.write(123);
       out.close();

Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Aug  3 23:34:04 2009
@@ -1,2 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663
+/hadoop/hdfs/trunk/src/webapps/datanode:796829-800617

Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Aug  3 23:34:04 2009
@@ -1,2 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663
+/hadoop/hdfs/trunk/src/webapps/hdfs:796829-800617

Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Aug  3 23:34:04 2009
@@ -1,2 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663
+/hadoop/hdfs/trunk/src/webapps/secondary:796829-800617



Mime
View raw message