hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1407217 [6/7] - in /hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/bkjournal/src/main/proto/ src/contrib/bkjournal/src/test/j...
Date Thu, 08 Nov 2012 19:10:04 GMT
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Thu Nov  8 19:09:46 2012
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -25,13 +27,26 @@ import java.util.List;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Directory INode class.
  */
 class INodeDirectory extends INode {
+  /** Cast INode to INodeDirectory. */
+  public static INodeDirectory valueOf(INode inode, String path
+      ) throws IOException {
+    if (inode == null) {
+      throw new IOException("Directory does not exist: " + path);
+    }
+    if (!inode.isDirectory()) {
+      throw new IOException("Path is not a directory: " + path);
+    }
+    return (INodeDirectory)inode; 
+  }
+
   protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
   final static String ROOT_NAME = "";
 
@@ -62,11 +77,9 @@ class INodeDirectory extends INode {
     this.children = other.getChildren();
   }
   
-  /**
-   * Check whether it's a directory
-   */
+  /** @return true unconditionally. */
   @Override
-  public boolean isDirectory() {
+  public final boolean isDirectory() {
     return true;
   }
 
@@ -112,14 +125,14 @@ class INodeDirectory extends INode {
   }
 
   /**
-   * Return the INode of the last component in components, or null if the last
+   * @return the INode of the last component in components, or null if the last
    * component does not exist.
    */
-  private INode getNode(byte[][] components, boolean resolveLink) 
-    throws UnresolvedLinkException {
-    INode[] inode  = new INode[1];
-    getExistingPathINodes(components, inode, resolveLink);
-    return inode[0];
+  private INode getNode(byte[][] components, boolean resolveLink
+      ) throws UnresolvedLinkException {
+    INodesInPath inodesInPath = getExistingPathINodes(components, 1,
+        resolveLink);
+    return inodesInPath.inodes[0];
   }
 
   /**
@@ -167,29 +180,31 @@ class INodeDirectory extends INode {
    * fill the array with [rootINode,c1,c2,null]
    * 
    * @param components array of path component name
-   * @param existing array to fill with existing INodes
+   * @param numOfINodes number of INodes to return
    * @param resolveLink indicates whether UnresolvedLinkException should
    *        be thrown when the path refers to a symbolic link.
-   * @return number of existing INodes in the path
+   * @return the specified number of existing INodes in the path
    */
-  int getExistingPathINodes(byte[][] components, INode[] existing, 
-      boolean resolveLink) throws UnresolvedLinkException {
+  INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes,
+      boolean resolveLink)
+      throws UnresolvedLinkException {
     assert this.compareTo(components[0]) == 0 :
         "Incorrect name " + getLocalName() + " expected "
         + (components[0] == null? null: DFSUtil.bytes2String(components[0]));
 
+    INodesInPath existing = new INodesInPath(numOfINodes);
     INode curNode = this;
     int count = 0;
-    int index = existing.length - components.length;
+    int index = numOfINodes - components.length;
     if (index > 0) {
       index = 0;
     }
     while (count < components.length && curNode != null) {
       final boolean lastComp = (count == components.length - 1);      
       if (index >= 0) {
-        existing[index] = curNode;
+        existing.inodes[index] = curNode;
       }
-      if (curNode.isLink() && (!lastComp || (lastComp && resolveLink))) {
+      if (curNode.isSymlink() && (!lastComp || (lastComp && resolveLink))) {
         final String path = constructPath(components, 0, components.length);
         final String preceding = constructPath(components, 0, count);
         final String remainder =
@@ -212,7 +227,7 @@ class INodeDirectory extends INode {
       count++;
       index++;
     }
-    return count;
+    return existing;
   }
 
   /**
@@ -228,16 +243,12 @@ class INodeDirectory extends INode {
    *         components in the path, and non existing components will be
    *         filled with null
    *         
-   * @see #getExistingPathINodes(byte[][], INode[])
+   * @see #getExistingPathINodes(byte[][], int, boolean)
    */
-  INode[] getExistingPathINodes(String path, boolean resolveLink) 
+  INodesInPath getExistingPathINodes(String path, boolean resolveLink) 
     throws UnresolvedLinkException {
     byte[][] components = getPathComponents(path);
-    INode[] inodes = new INode[components.length];
-
-    this.getExistingPathINodes(components, inodes, resolveLink);
-    
-    return inodes;
+    return getExistingPathINodes(components, components.length, resolveLink);
   }
 
   /**
@@ -299,10 +310,7 @@ class INodeDirectory extends INode {
   <T extends INode> T addNode(String path, T newNode
       ) throws FileNotFoundException, UnresolvedLinkException  {
     byte[][] pathComponents = getPathComponents(path);        
-    if(addToParent(pathComponents, newNode,
-                    true) == null)
-      return null;
-    return newNode;
+    return addToParent(pathComponents, newNode, true) == null? null: newNode;
   }
 
   /**
@@ -326,15 +334,13 @@ class INodeDirectory extends INode {
     return parent;
   }
 
-  INodeDirectory getParent(byte[][] pathComponents)
-  throws FileNotFoundException, UnresolvedLinkException {
-    int pathLen = pathComponents.length;
-    if (pathLen < 2)  // add root
+  INodeDirectory getParent(byte[][] pathComponents
+      ) throws FileNotFoundException, UnresolvedLinkException {
+    if (pathComponents.length < 2)  // add root
       return null;
     // Gets the parent INode
-    INode[] inodes  = new INode[2];
-    getExistingPathINodes(pathComponents, inodes, false);
-    INode inode = inodes[0];
+    INodesInPath inodes =  getExistingPathINodes(pathComponents, 2, false);
+    INode inode = inodes.inodes[0];
     if (inode == null) {
       throw new FileNotFoundException("Parent path does not exist: "+
           DFSUtil.byteArray2String(pathComponents));
@@ -355,21 +361,15 @@ class INodeDirectory extends INode {
    * @throws  FileNotFoundException if parent does not exist or 
    *          is not a directory.
    */
-  INodeDirectory addToParent( byte[][] pathComponents,
-                              INode newNode,
-                              boolean propagateModTime
-                            ) throws FileNotFoundException, 
-                                     UnresolvedLinkException {
-              
-    int pathLen = pathComponents.length;
-    if (pathLen < 2)  // add root
+  INodeDirectory addToParent(byte[][] pathComponents, INode newNode,
+      boolean propagateModTime) throws FileNotFoundException, UnresolvedLinkException {
+    if (pathComponents.length < 2) { // add root
       return null;
-    newNode.name = pathComponents[pathLen-1];
+    }
+    newNode.name = pathComponents[pathComponents.length - 1];
     // insert into the parent children list
     INodeDirectory parent = getParent(pathComponents);
-    if(parent.addChild(newNode, propagateModTime) == null)
-      return null;
-    return parent;
+    return parent.addChild(newNode, propagateModTime) == null? null: parent;
   }
 
   @Override
@@ -415,25 +415,99 @@ class INodeDirectory extends INode {
   }
 
   /**
+   * @return an empty list if the children list is null;
+   *         otherwise, return the children list.
+   *         The returned list should not be modified.
    */
-  List<INode> getChildren() {
-    return children==null ? new ArrayList<INode>() : children;
+  public List<INode> getChildrenList() {
+    return children==null ? EMPTY_LIST : children;
   }
-  List<INode> getChildrenRaw() {
+  /** @return the children list which is possibly null. */
+  public List<INode> getChildren() {
     return children;
   }
 
   @Override
-  int collectSubtreeBlocksAndClear(List<Block> v) {
+  int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
     int total = 1;
     if (children == null) {
       return total;
     }
     for (INode child : children) {
-      total += child.collectSubtreeBlocksAndClear(v);
+      total += child.collectSubtreeBlocksAndClear(info);
     }
     parent = null;
     children = null;
     return total;
   }
+  
+  /**
+   * Used by
+   * {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}.
+   * Containing INodes information resolved from a given path.
+   */
+  static class INodesInPath {
+    private INode[] inodes;
+    
+    public INodesInPath(int number) {
+      assert (number >= 0);
+      this.inodes = new INode[number];
+    }
+    
+    INode[] getINodes() {
+      return inodes;
+    }
+    
+    void setINode(int i, INode inode) {
+      inodes[i] = inode;
+    }
+  }
+
+  /*
+   * The following code is to dump the tree recursively for testing.
+   * 
+   *      \- foo   (INodeDirectory@33dd2717)
+   *        \- sub1   (INodeDirectory@442172)
+   *          +- file1   (INodeFile@78392d4)
+   *          +- file2   (INodeFile@78392d5)
+   *          +- sub11   (INodeDirectory@8400cff)
+   *            \- file3   (INodeFile@78392d6)
+   *          \- z_file4   (INodeFile@45848712)
+   */
+  static final String DUMPTREE_EXCEPT_LAST_ITEM = "+-"; 
+  static final String DUMPTREE_LAST_ITEM = "\\-";
+  @VisibleForTesting
+  @Override
+  public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
+    super.dumpTreeRecursively(out, prefix);
+    if (prefix.length() >= 2) {
+      prefix.setLength(prefix.length() - 2);
+      prefix.append("  ");
+    }
+    dumpTreeRecursively(out, prefix, children);
+  }
+
+  /**
+   * Dump the given subtrees.
+   * @param prefix The prefix string that each line should print.
+   * @param subs The subtrees.
+   */
+  @VisibleForTesting
+  protected static void dumpTreeRecursively(PrintWriter out,
+      StringBuilder prefix, List<? extends INode> subs) {
+    prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
+    if (subs != null && subs.size() != 0) {
+      int i = 0;
+      for(; i < subs.size() - 1; i++) {
+        subs.get(i).dumpTreeRecursively(out, prefix);
+        prefix.setLength(prefix.length() - 2);
+        prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
+      }
+
+      prefix.setLength(prefix.length() - 2);
+      prefix.append(DUMPTREE_LAST_ITEM);
+      subs.get(i).dumpTreeRecursively(out, prefix);
+    }
+    prefix.setLength(prefix.length() - 2);
+  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Thu Nov  8 19:09:46 2012
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -32,6 +32,17 @@ import org.apache.hadoop.hdfs.server.blo
 /** I-node for closed file. */
 @InterfaceAudience.Private
 public class INodeFile extends INode implements BlockCollection {
+  /** Cast INode to INodeFile. */
+  public static INodeFile valueOf(INode inode, String path) throws IOException {
+    if (inode == null) {
+      throw new FileNotFoundException("File does not exist: " + path);
+    }
+    if (!(inode instanceof INodeFile)) {
+      throw new FileNotFoundException("Path is not a file: " + path);
+    }
+    return (INodeFile)inode;
+  }
+
   static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
 
   //Number of bits for Block size
@@ -43,7 +54,7 @@ public class INodeFile extends INode imp
 
   private long header;
 
-  BlockInfo blocks[] = null;
+  private BlockInfo[] blocks;
 
   INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
                       short replication, long modificationTime,
@@ -51,7 +62,7 @@ public class INodeFile extends INode imp
     super(permissions, modificationTime, atime);
     this.setReplication(replication);
     this.setPreferredBlockSize(preferredBlockSize);
-    blocks = blklist;
+    this.blocks = blklist;
   }
 
   /**
@@ -64,11 +75,6 @@ public class INodeFile extends INode imp
     super.setPermission(permission.applyUMask(UMASK));
   }
 
-  @Override
-  boolean isDirectory() {
-    return false;
-  }
-
   /** @return the replication factor of the file. */
   @Override
   public short getBlockReplication() {
@@ -116,7 +122,7 @@ public class INodeFile extends INode imp
     for(BlockInfo bi: newlist) {
       bi.setBlockCollection(this);
     }
-    this.blocks = newlist;
+    setBlocks(newlist);
   }
   
   /**
@@ -124,14 +130,13 @@ public class INodeFile extends INode imp
    */
   void addBlock(BlockInfo newblock) {
     if (this.blocks == null) {
-      this.blocks = new BlockInfo[1];
-      this.blocks[0] = newblock;
+      this.setBlocks(new BlockInfo[]{newblock});
     } else {
       int size = this.blocks.length;
       BlockInfo[] newlist = new BlockInfo[size + 1];
       System.arraycopy(this.blocks, 0, newlist, 0, size);
       newlist[size] = newblock;
-      this.blocks = newlist;
+      this.setBlocks(newlist);
     }
   }
 
@@ -140,16 +145,21 @@ public class INodeFile extends INode imp
     this.blocks[idx] = blk;
   }
 
+  /** Set the blocks. */
+  public void setBlocks(BlockInfo[] blocks) {
+    this.blocks = blocks;
+  }
+
   @Override
-  int collectSubtreeBlocksAndClear(List<Block> v) {
+  int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
     parent = null;
-    if(blocks != null && v != null) {
+    if(blocks != null && info != null) {
       for (BlockInfo blk : blocks) {
-        v.add(blk);
+        info.addDeleteBlock(blk);
         blk.setBlockCollection(null);
       }
     }
-    blocks = null;
+    setBlocks(null);
     return 1;
   }
   

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Thu Nov  8 19:09:46 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -25,16 +26,24 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
-
-import com.google.common.base.Joiner;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 
 /**
  * I-node for file being written.
  */
 @InterfaceAudience.Private
 class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollection {
+  /** Cast INode to INodeFileUnderConstruction. */
+  public static INodeFileUnderConstruction valueOf(INode inode, String path
+      ) throws IOException {
+    final INodeFile file = INodeFile.valueOf(inode, path);
+    if (!file.isUnderConstruction()) {
+      throw new IOException("File is not under construction: " + path);
+    }
+    return (INodeFileUnderConstruction)file;
+  }
+
   private  String clientName;         // lease holder
   private final String clientMachine;
   private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@@ -99,9 +108,9 @@ class INodeFileUnderConstruction extends
   // use the modification time as the access time
   //
   INodeFile convertToInodeFile() {
-    assert allBlocksComplete() :
-      "Can't finalize inode " + this + " since it contains " +
-      "non-complete blocks! Blocks are: " + blocksAsString();
+    assert allBlocksComplete() : "Can't finalize inode " + this
+      + " since it contains non-complete blocks! Blocks are "
+      + Arrays.asList(getBlocks());
     INodeFile obj = new INodeFile(getPermissionStatus(),
                                   getBlocks(),
                                   getBlockReplication(),
@@ -116,7 +125,7 @@ class INodeFileUnderConstruction extends
    * @return true if all of the blocks in this file are marked as completed.
    */
   private boolean allBlocksComplete() {
-    for (BlockInfo b : blocks) {
+    for (BlockInfo b : getBlocks()) {
       if (!b.isComplete()) {
         return false;
       }
@@ -129,6 +138,7 @@ class INodeFileUnderConstruction extends
    * the last one on the list.
    */
   void removeLastBlock(Block oldblock) throws IOException {
+    final BlockInfo[] blocks = getBlocks();
     if (blocks == null) {
       throw new IOException("Trying to delete non-existant block " + oldblock);
     }
@@ -140,7 +150,7 @@ class INodeFileUnderConstruction extends
     //copy to a new list
     BlockInfo[] newlist = new BlockInfo[size_1];
     System.arraycopy(blocks, 0, newlist, 0, size_1);
-    blocks = newlist;
+    setBlocks(newlist);
   }
 
   /**
@@ -149,11 +159,9 @@ class INodeFileUnderConstruction extends
    */
   @Override
   public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
-                                          DatanodeDescriptor[] targets)
-  throws IOException {
-    if (blocks == null || blocks.length == 0) {
-      throw new IOException("Trying to update non-existant block. " +
-          "File is empty.");
+      DatanodeDescriptor[] targets) throws IOException {
+    if (numBlocks() == 0) {
+      throw new IOException("Failed to set last block: File is empty.");
     }
     BlockInfoUnderConstruction ucBlock =
       lastBlock.convertToBlockUnderConstruction(
@@ -162,8 +170,4 @@ class INodeFileUnderConstruction extends
     setBlock(numBlocks()-1, ucBlock);
     return ucBlock;
   }
-  
-  private String blocksAsString() {
-    return Joiner.on(",").join(this.blocks);
-  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java Thu Nov  8 19:09:46 2012
@@ -17,12 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.util.List;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.Block;
 
 /**
  * An INode representing a symbolic link.
@@ -41,7 +38,7 @@ public class INodeSymlink extends INode 
   }
 
   @Override
-  public boolean isLink() {
+  public boolean isSymlink() {
     return true;
   }
   
@@ -64,7 +61,7 @@ public class INodeSymlink extends INode 
   }
   
   @Override
-  int collectSubtreeBlocksAndClear(List<Block> v) {
+  int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
     return 1;
   }
 
@@ -73,9 +70,4 @@ public class INodeSymlink extends INode 
     summary[1]++; // Increment the file count
     return summary;
   }
-
-  @Override
-  public boolean isDirectory() {
-    return false;
-  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Thu Nov  8 19:09:46 2012
@@ -253,7 +253,7 @@ public class LeaseManager {
     private String findPath(INodeFileUnderConstruction pendingFile) {
       try {
         for (String src : paths) {
-          if (fsnamesystem.dir.getFileINode(src) == pendingFile) {
+          if (fsnamesystem.dir.getINode(src) == pendingFile) {
             return src;
           }
         }
@@ -429,7 +429,7 @@ public class LeaseManager {
         return;
       }
 
-      LOG.info("Lease " + oldest + " has expired hard limit");
+      LOG.info(oldest + " has expired hard limit");
 
       final List<String> removing = new ArrayList<String>();
       // need to create a copy of the oldest lease paths, becuase 
@@ -441,15 +441,14 @@ public class LeaseManager {
       for(String p : leasePaths) {
         try {
           if(fsnamesystem.internalReleaseLease(oldest, p, HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
-            LOG.info("Lease recovery for file " + p +
-                          " is complete. File closed.");
+            LOG.info("Lease recovery for " + p + " is complete. File closed.");
             removing.add(p);
           } else {
-            LOG.info("Started block recovery for file " + p +
-                          " lease " + oldest);
+            LOG.info("Started block recovery " + p + " lease " + oldest);
           }
         } catch (IOException e) {
-          LOG.error("Cannot release the path "+p+" in the lease "+oldest, e);
+          LOG.error("Cannot release the path " + p + " in the lease "
+              + oldest, e);
           removing.add(p);
         }
       }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java Thu Nov  8 19:09:46 2012
@@ -102,7 +102,7 @@ public final class MetaRecoveryContext  
       "without prompting. " + 
       "(c/s/q/a)\n", "c", "s", "q", "a");
     if (answer.equals("c")) {
-      LOG.info("Continuing.");
+      LOG.info("Continuing");
       return;
     } else if (answer.equals("s")) {
       throw new RequestStopException("user requested stop");

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1401063-1407201

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c Thu Nov  8 19:09:46 2012
@@ -1388,6 +1388,32 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f)
     return 0;
 }
 
+int hdfsHSync(hdfsFS fs, hdfsFile f)
+{
+    //Get the JNIEnv* corresponding to current thread
+    JNIEnv* env = getJNIEnv();
+    if (env == NULL) {
+      errno = EINTERNAL;
+      return -1;
+    }
+
+    //Sanity check
+    if (!f || f->type != OUTPUT) {
+        errno = EBADF;
+        return -1;
+    }
+
+    jobject jOutputStream = f->file;
+    jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
+                     HADOOP_OSTRM, "hsync", "()V");
+    if (jthr) {
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "hdfsHSync: FSDataOutputStream#hsync");
+        return -1;
+    }
+    return 0;
+}
+
 int hdfsAvailable(hdfsFS fs, hdfsFile f)
 {
     // JAVA EQUIVALENT

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h Thu Nov  8 19:09:46 2012
@@ -394,6 +394,17 @@ extern  "C" {
 
 
     /**
+     * hdfsHSync - Similar to posix fsync, Flush out the data in client's 
+     * user buffer. all the way to the disk device (but the disk may have 
+     * it in its cache).
+     * @param fs configured filesystem handle
+     * @param file file handle
+     * @return 0 on success, -1 on error and sets errno
+     */
+    int hdfsHSync(hdfsFS fs, hdfsFile file);
+
+
+    /**
      * hdfsAvailable - Number of bytes that can be read from this
      * input stream without blocking.
      * @param fs The configured filesystem handle.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c Thu Nov  8 19:09:46 2012
@@ -146,6 +146,7 @@ static int hashTableInit(void)
             if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
                 fprintf(stderr, "error creating hashtable, <%d>: %s\n",
                         errno, strerror(errno));
+                UNLOCK_HASH_TABLE();
                 return 0;
             } 
             hashTableInited = 1;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c Thu Nov  8 19:09:46 2012
@@ -24,10 +24,15 @@
 #include <jni.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 
 #define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
 #define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
 #define HADOOP_CONF     "org/apache/hadoop/conf/Configuration"
+#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
+#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
+
+#define DFS_WEBHDFS_ENABLED_KEY "dfs.webhdfs.enabled"
 
 struct NativeMiniDfsCluster {
     /**
@@ -39,10 +44,11 @@ struct NativeMiniDfsCluster {
 struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
 {
     struct NativeMiniDfsCluster* cl = NULL;
-    jobject bld = NULL, bld2 = NULL, cobj = NULL;
+    jobject bld = NULL, cobj = NULL, cluster = NULL;
     jvalue  val;
     JNIEnv *env = getJNIEnv();
     jthrowable jthr;
+    jstring jconfStr = NULL;
 
     if (!env) {
         fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n");
@@ -57,53 +63,76 @@ struct NativeMiniDfsCluster* nmdCreate(s
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "nmdCreate: new Configuration");
-        goto error_free_cl;
+        goto error;
+    }
+    if (conf->webhdfsEnabled) {
+        jthr = newJavaStr(env, DFS_WEBHDFS_ENABLED_KEY, &jconfStr);
+        if (jthr) {
+            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                  "nmdCreate: new String");
+            goto error;
+        }
+        jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
+                            "setBoolean", "(Ljava/lang/String;Z)V",
+                            jconfStr, conf->webhdfsEnabled);
+        if (jthr) {
+            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                  "nmdCreate: Configuration::setBoolean");
+            goto error;
+        }
     }
     jthr = constructNewObjectOfClass(env, &bld, MINIDFS_CLUSTER_BUILDER,
                     "(L"HADOOP_CONF";)V", cobj);
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "nmdCreate: NativeMiniDfsCluster#Builder#Builder");
-        goto error_dlr_cobj;
+        goto error;
     }
     jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
             "format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat);
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
                               "Builder::format");
-        goto error_dlr_bld;
+        goto error;
+    }
+    (*env)->DeleteLocalRef(env, val.l);
+    if (conf->webhdfsEnabled) {
+        jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+                        "nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
+                        conf->namenodeHttpPort);
+        if (jthr) {
+            printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
+                                  "Builder::nameNodeHttpPort");
+            goto error;
+        }
+        (*env)->DeleteLocalRef(env, val.l);
     }
-    bld2 = val.l;
     jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
             "build", "()L" MINIDFS_CLUSTER ";");
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                               "nmdCreate: Builder#build");
-        goto error_dlr_bld2;
+        goto error;
     }
-	cl->obj = (*env)->NewGlobalRef(env, val.l);
+    cluster = val.l;
+	  cl->obj = (*env)->NewGlobalRef(env, val.l);
     if (!cl->obj) {
         printPendingExceptionAndFree(env, PRINT_EXC_ALL,
             "nmdCreate: NewGlobalRef");
-        goto error_dlr_val;
+        goto error;
     }
-    (*env)->DeleteLocalRef(env, val.l);
-    (*env)->DeleteLocalRef(env, bld2);
+    (*env)->DeleteLocalRef(env, cluster);
     (*env)->DeleteLocalRef(env, bld);
     (*env)->DeleteLocalRef(env, cobj);
+    (*env)->DeleteLocalRef(env, jconfStr);
     return cl;
 
-error_dlr_val:
-    (*env)->DeleteLocalRef(env, val.l);
-error_dlr_bld2:
-    (*env)->DeleteLocalRef(env, bld2);
-error_dlr_bld:
+error:
+    (*env)->DeleteLocalRef(env, cluster);
     (*env)->DeleteLocalRef(env, bld);
-error_dlr_cobj:
     (*env)->DeleteLocalRef(env, cobj);
-error_free_cl:
+    (*env)->DeleteLocalRef(env, jconfStr);
     free(cl);
-error:
     return NULL;
 }
 
@@ -177,3 +206,69 @@ int nmdGetNameNodePort(const struct Nati
     }
     return jVal.i;
 }
+
+int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
+                               int *port, const char **hostName)
+{
+    JNIEnv *env = getJNIEnv();
+    jvalue jVal;
+    jobject jNameNode, jAddress;
+    jthrowable jthr;
+    int ret = 0;
+    const char *host;
+    
+    if (!env) {
+        fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
+        return -EIO;
+    }
+    // First get the (first) NameNode of the cluster
+    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
+                        "getNameNode", "()L" HADOOP_NAMENODE ";");
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                              "nmdGetNameNodeHttpAddress: "
+                              "MiniDFSCluster#getNameNode");
+        return -EIO;
+    }
+    jNameNode = jVal.l;
+    
+    // Then get the http address (InetSocketAddress) of the NameNode
+    jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
+                        "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "nmdGetNameNodeHttpAddress: "
+                                    "NameNode#getHttpAddress");
+        goto error_dlr_nn;
+    }
+    jAddress = jVal.l;
+    
+    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
+                        JAVA_INETSOCKETADDRESS, "getPort", "()I");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "nmdGetNameNodeHttpAddress: "
+                                    "InetSocketAddress#getPort");
+        goto error_dlr_addr;
+    }
+    *port = jVal.i;
+    
+    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
+                        "getHostName", "()Ljava/lang/String;");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "nmdGetNameNodeHttpAddress: "
+                                    "InetSocketAddress#getHostName");
+        goto error_dlr_addr;
+    }
+    host = (*env)->GetStringUTFChars(env, jVal.l, NULL);
+    *hostName = strdup(host);
+    (*env)->ReleaseStringUTFChars(env, jVal.l, host);
+    
+error_dlr_addr:
+    (*env)->DeleteLocalRef(env, jAddress);
+error_dlr_nn:
+    (*env)->DeleteLocalRef(env, jNameNode);
+    
+    return ret;
+}

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h Thu Nov  8 19:09:46 2012
@@ -31,6 +31,14 @@ struct NativeMiniDfsConf {
      * Nonzero if the cluster should be formatted prior to startup
      */
     jboolean doFormat;
+    /**
+     * Whether or not to enable webhdfs in MiniDfsCluster
+     */
+    jboolean webhdfsEnabled;
+    /**
+     * The http port of the namenode in MiniDfsCluster
+     */
+    jint namenodeHttpPort;
 };
 
 /**
@@ -76,5 +84,21 @@ void nmdFree(struct NativeMiniDfsCluster
  *
  * @return          the port, or a negative error code
  */
-int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); 
+int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl);
+
+/**
+ * Get the http address that's in use by the given (non-HA) nativeMiniDfs
+ *
+ * @param cl        The initialized NativeMiniDfsCluster
+ * @param port      Used to capture the http port of the NameNode 
+ *                  of the NativeMiniDfsCluster
+ * @param hostName  Used to capture the http hostname of the NameNode
+ *                  of the NativeMiniDfsCluster
+ *
+ * @return          0 on success; a non-zero error code if failing to
+ *                  get the information.
+ */
+int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
+                               int *port, const char **hostName);
+
 #endif

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c Thu Nov  8 19:09:46 2012
@@ -150,6 +150,7 @@ static int doTestHdfsOperations(struct t
         return EIO;
     }
     EXPECT_ZERO(hdfsFlush(fs, file));
+    EXPECT_ZERO(hdfsHSync(fs, file));
     EXPECT_ZERO(hdfsCloseFile(fs, file));
 
     /* Let's re-open the file for reading */

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto Thu Nov  8 19:09:46 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "ClientDatanodeProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Thu Nov  8 19:09:46 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "ClientNamenodeProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Thu Nov  8 19:09:46 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "DatanodeProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto Thu Nov  8 19:09:46 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "GetUserMappingsProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 /**
  *  Get groups for user request.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto Thu Nov  8 19:09:46 2012
@@ -17,6 +17,7 @@
  */
 option java_package = "org.apache.hadoop.hdfs.server.namenode.ha.proto";
 option java_outer_classname = "HAZKInfoProtos";
+package hadoop.hdfs;
 
 message ActiveNodeInfo {
   required string nameserviceId = 1;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto Thu Nov  8 19:09:46 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "InterDatanodeProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto Thu Nov  8 19:09:46 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "JournalProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto Thu Nov  8 19:09:46 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "NamenodeProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto Thu Nov  8 19:09:46 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "QJournalProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto Thu Nov  8 19:09:46 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "RefreshAuthorizationPolicyProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 /**
  *  Refresh service acl request.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto Thu Nov  8 19:09:46 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "RefreshUserMappingsProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 /**
  *  Refresh user to group mappings request.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Thu Nov  8 19:09:46 2012
@@ -22,6 +22,7 @@
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "DataTransferProtos";
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 import "hdfs.proto";
 
@@ -180,5 +181,5 @@ message OpBlockChecksumResponseProto {
   required uint32 bytesPerCrc = 1;
   required uint64 crcPerBlock = 2;
   required bytes md5 = 3;
-  optional ChecksumTypeProto crcType = 4 [default = CRC32];
+  optional ChecksumTypeProto crcType = 4 [default = CHECKSUM_CRC32];
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Thu Nov  8 19:09:46 2012
@@ -22,6 +22,7 @@
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "HdfsProtos";
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 /**
  * Extended block idenfies a block
@@ -180,11 +181,13 @@ message HdfsFileStatusProto {
 
 /**
  * Checksum algorithms/types used in HDFS
+ * Make sure this enum's integer values match enum values' id properties defined
+ * in org.apache.hadoop.util.DataChecksum.Type
  */
 enum ChecksumTypeProto {
-  NULL = 0;
-  CRC32 = 1;
-  CRC32C = 2;
+  CHECKSUM_NULL = 0;
+  CHECKSUM_CRC32 = 1;
+  CHECKSUM_CRC32C = 2;
 }
 
 /**
@@ -198,7 +201,7 @@ message FsServerDefaultsProto {
   required uint32 fileBufferSize = 5;
   optional bool encryptDataTransfer = 6 [default = false];
   optional uint64 trashInterval = 7 [default = 0];
-  optional ChecksumTypeProto checksumType = 8 [default = CRC32];
+  optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
 }
 
 

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1401063-1407201

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1401063-1407201

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1401063-1407201

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1401063-1407201

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Nov  8 19:09:46 2012
@@ -81,6 +81,8 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter;
+import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -95,6 +97,7 @@ import org.apache.hadoop.net.StaticMappi
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
@@ -145,6 +148,7 @@ public class MiniDFSCluster {
     private boolean setupHostsFile = false;
     private MiniDFSNNTopology nnTopology = null;
     private boolean checkExitOnShutdown = true;
+    private boolean checkDataNodeAddrConfig = false;
     private boolean checkDataNodeHostConfig = false;
     
     public Builder(Configuration conf) {
@@ -266,6 +270,14 @@ public class MiniDFSCluster {
     /**
      * Default: false
      */
+    public Builder checkDataNodeAddrConfig(boolean val) {
+      this.checkDataNodeAddrConfig = val;
+      return this;
+    }
+
+    /**
+     * Default: false
+     */
     public Builder checkDataNodeHostConfig(boolean val) {
       this.checkDataNodeHostConfig = val;
       return this;
@@ -336,6 +348,7 @@ public class MiniDFSCluster {
                        builder.setupHostsFile,
                        builder.nnTopology,
                        builder.checkExitOnShutdown,
+                       builder.checkDataNodeAddrConfig,
                        builder.checkDataNodeHostConfig);
   }
   
@@ -343,11 +356,14 @@ public class MiniDFSCluster {
     DataNode datanode;
     Configuration conf;
     String[] dnArgs;
+    SecureResources secureResources;
 
-    DataNodeProperties(DataNode node, Configuration conf, String[] args) {
+    DataNodeProperties(DataNode node, Configuration conf, String[] args,
+                       SecureResources secureResources) {
       this.datanode = node;
       this.conf = conf;
       this.dnArgs = args;
+      this.secureResources = secureResources;
     }
   }
 
@@ -573,7 +589,7 @@ public class MiniDFSCluster {
         manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
         operation, racks, hosts,
         simulatedCapacities, null, true, false,
-        MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false);
+        MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false);
   }
 
   private void initMiniDFSCluster(
@@ -584,6 +600,7 @@ public class MiniDFSCluster {
       String[] hosts, long[] simulatedCapacities, String clusterId,
       boolean waitSafeMode, boolean setupHostsFile,
       MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
+      boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig)
   throws IOException {
     ExitUtil.disableSystemExit();
@@ -647,7 +664,7 @@ public class MiniDFSCluster {
 
     // Start the DataNodes
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
-        hosts, simulatedCapacities, setupHostsFile, false, checkDataNodeHostConfig);
+        hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig);
     waitClusterUp();
     //make sure ProxyUsers uses the latest conf
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@@ -1161,7 +1178,18 @@ public class MiniDFSCluster {
       if (hosts != null) {
         NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
       }
-      DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf);
+
+      SecureResources secureResources = null;
+      if (UserGroupInformation.isSecurityEnabled()) {
+        SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf);
+        try {
+          secureResources = SecureDataNodeStarter.getSecureResources(sslFactory, dnConf);
+        } catch (Exception ex) {
+          ex.printStackTrace();
+        }
+      }
+      DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf,
+                                                 secureResources);
       if(dn == null)
         throw new IOException("Cannot start DataNode in "
             + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
@@ -1176,7 +1204,7 @@ public class MiniDFSCluster {
                                   racks[i-curDatanodesNum]);
       }
       dn.runDatanodeDaemon();
-      dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));
+      dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources));
     }
     curDatanodesNum += numDataNodes;
     this.numDataNodes += numDataNodes;
@@ -1607,14 +1635,16 @@ public class MiniDFSCluster {
       boolean keepPort) throws IOException {
     Configuration conf = dnprop.conf;
     String[] args = dnprop.dnArgs;
+    SecureResources secureResources = dnprop.secureResources;
     Configuration newconf = new HdfsConfiguration(conf); // save cloned config
     if (keepPort) {
       InetSocketAddress addr = dnprop.datanode.getXferAddress();
       conf.set(DFS_DATANODE_ADDRESS_KEY, 
           addr.getAddress().getHostAddress() + ":" + addr.getPort());
     }
-    dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
-        newconf, args));
+    dataNodes.add(new DataNodeProperties(
+        DataNode.createDataNode(args, conf, secureResources),
+        newconf, args, secureResources));
     numDataNodes++;
     return true;
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Nov  8 19:09:46 2012
@@ -876,7 +876,14 @@ public class TestDFSClientRetries {
       new Random().nextBytes(bytes);
       out4.write(bytes);
       out4.write(bytes);
-      out4.hflush();
+      if (isWebHDFS) {
+        // WebHDFS does not support hflush. To avoid DataNode communicating with
+        // NN while we're shutting down NN, we call out4.close() to finish
+        // writing the data
+        out4.close();
+      } else {
+        out4.hflush();
+      }
 
       //shutdown namenode
       assertTrue(HdfsUtils.isHealthy(uri));
@@ -889,10 +896,12 @@ public class TestDFSClientRetries {
         public void run() {
           try {
             //write some more data and then close the file
-            out4.write(bytes);
-            out4.write(bytes);
-            out4.write(bytes);
-            out4.close();
+            if (!isWebHDFS) {
+              out4.write(bytes);
+              out4.write(bytes);
+              out4.write(bytes);
+              out4.close();
+            }
           } catch (Exception e) {
             exceptions.add(e);
           }
@@ -975,7 +984,11 @@ public class TestDFSClientRetries {
           Assert.assertEquals(String.format("count=%d", count),
               bytes[count % bytes.length], (byte)r);
         }
-        Assert.assertEquals(5 * bytes.length, count);
+        if (!isWebHDFS) {
+          Assert.assertEquals(5 * bytes.length, count);
+        } else {
+          Assert.assertEquals(2 * bytes.length, count);
+        }
         in.close();
       }
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Thu Nov  8 19:09:46 2012
@@ -1145,7 +1145,7 @@ public class TestDFSShell {
 
         args = new String[2];
         args[0] = "-touchz";
-        args[1] = "/test/mkdirs/noFileHere";
+        args[1] = "/test/mkdirs/isFileHere";
         val = -1;
         try {
           val = shell.run(args);
@@ -1157,7 +1157,7 @@ public class TestDFSShell {
 
         args = new String[2];
         args[0] = "-touchz";
-        args[1] = "/test/mkdirs/thisDirNotExists/noFileHere";
+        args[1] = "/test/mkdirs/thisDirNotExists/isFileHere";
         val = -1;
         try {
           val = shell.run(args);
@@ -1171,7 +1171,7 @@ public class TestDFSShell {
         args = new String[3];
         args[0] = "-test";
         args[1] = "-e";
-        args[2] = "/test/mkdirs/noFileHere";
+        args[2] = "/test/mkdirs/isFileHere";
         val = -1;
         try {
           val = shell.run(args);
@@ -1243,7 +1243,106 @@ public class TestDFSShell {
         }
         assertEquals(0, val);
       }
-        
+
+      // Verify -test -f negative case (missing file)
+      {
+        String[] args = new String[3];
+        args[0] = "-test";
+        args[1] = "-f";
+        args[2] = "/test/mkdirs/noFileHere";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertEquals(1, val);
+      }
+
+      // Verify -test -f negative case (directory rather than file)
+      {
+        String[] args = new String[3];
+        args[0] = "-test";
+        args[1] = "-f";
+        args[2] = "/test/mkdirs";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertEquals(1, val);
+      }
+
+      // Verify -test -f positive case
+      {
+        writeFile(fileSys, myFile);
+        assertTrue(fileSys.exists(myFile));
+
+        String[] args = new String[3];
+        args[0] = "-test";
+        args[1] = "-f";
+        args[2] = myFile.toString();
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertEquals(0, val);
+      }
+
+      // Verify -test -s negative case (missing file)
+      {
+        String[] args = new String[3];
+        args[0] = "-test";
+        args[1] = "-s";
+        args[2] = "/test/mkdirs/noFileHere";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertEquals(1, val);
+      }
+
+      // Verify -test -s negative case (zero length file)
+      {
+        String[] args = new String[3];
+        args[0] = "-test";
+        args[1] = "-s";
+        args[2] = "/test/mkdirs/isFileHere";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertEquals(1, val);
+      }
+
+      // Verify -test -s positive case (nonzero length file)
+      {
+        String[] args = new String[3];
+        args[0] = "-test";
+        args[1] = "-s";
+        args[2] = myFile.toString();
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertEquals(0, val);
+      }
+
     } finally {
       try {
         fileSys.close();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Thu Nov  8 19:09:46 2012
@@ -119,8 +119,6 @@ public class TestDistributedFileSystem {
       DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L);
       DFSTestUtil.readFile(fileSys, p);
       
-      DFSClient client = ((DistributedFileSystem)fileSys).dfs;
-
       fileSys.close();
       
     } finally {
@@ -476,7 +474,7 @@ public class TestDistributedFileSystem {
       fail("Expecting FileNotFoundException");
     } catch (FileNotFoundException e) {
       assertTrue("Not throwing the intended exception message", e.getMessage()
-          .contains("File does not exist: /test/TestExistingDir"));
+          .contains("Path is not a file: /test/TestExistingDir"));
     }
     
     //hftp
@@ -542,6 +540,21 @@ public class TestDistributedFileSystem {
       final FileChecksum webhdfs_qfoocs = webhdfs.getFileChecksum(webhdfsqualified);
       System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs);
 
+      //create a zero byte file
+      final Path zeroByteFile = new Path(dir, "zeroByteFile" + n);
+      {
+        final FSDataOutputStream out = hdfs.create(zeroByteFile, false, buffer_size,
+            (short)2, block_size);
+        out.close();
+      }
+
+      // verify the magic val for zero byte files
+      {
+        final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile);
+        assertEquals(zeroChecksum.toString(),
+            "MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51");
+      }
+
       //write another file
       final Path bar = new Path(dir, "bar" + n);
       {
@@ -697,7 +710,6 @@ public class TestDistributedFileSystem {
   @Test
   public void testCreateWithCustomChecksum() throws Exception {
     Configuration conf = getTestConfiguration();
-    final long grace = 1000L;
     MiniDFSCluster cluster = null;
     Path testBasePath = new Path("/test/csum");
     // create args 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Thu Nov  8 19:09:46 2012
@@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -42,18 +43,17 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.log4j.Level;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.*;
 
 public class TestHftpFileSystem {
   private static final Random RAN = new Random();
   
   private static Configuration config = null;
   private static MiniDFSCluster cluster = null;
-  private static FileSystem hdfs = null;
-  private static HftpFileSystem hftpFs = null;
   private static String blockPoolId = null;
+  private static String hftpUri = null;
+  private FileSystem hdfs = null;
+  private HftpFileSystem hftpFs = null;
 
   private static Path[] TEST_PATHS = new Path[] {
       // URI does not encode, Request#getPathInfo returns /foo
@@ -93,26 +93,33 @@ public class TestHftpFileSystem {
 
     config = new Configuration();
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build();
-    hdfs = cluster.getFileSystem();
     blockPoolId = cluster.getNamesystem().getBlockPoolId();
-    final String hftpUri = 
+    hftpUri = 
       "hftp://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-    hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(config);
   }
   
   @AfterClass
   public static void tearDown() throws IOException {
-    if (hdfs != null) {
-      hdfs.close();
-    }
-    if (hftpFs != null) {
-      hftpFs.close();
-    }
     if (cluster != null) {
       cluster.shutdown();
     }
   }
+  
+  @Before
+  public void initFileSystems() throws IOException {
+    hdfs = cluster.getFileSystem();
+    hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(config);
+    // clear out the namespace
+    for (FileStatus stat : hdfs.listStatus(new Path("/"))) {
+      hdfs.delete(stat.getPath(), true);
+    }
+  }
 
+  @After
+  public void resetFileSystems() throws IOException {
+    FileSystem.closeAll();
+  }
+  
   /**
    * Test file creation and access with file names that need encoding. 
    */
@@ -280,19 +287,8 @@ public class TestHftpFileSystem {
     assertEquals("Stream closed", ioe.getMessage());
   }
   
-  public void resetFileSystem() throws IOException {
-    // filesystem caching has a quirk/bug that it caches based on the user's
-    // given uri.  the result is if a filesystem is instantiated with no port,
-    // it gets the default port.  then if the default port is changed,
-    // and another filesystem is instantiated with no port, the prior fs
-    // is returned, not a new one using the changed port.  so let's flush
-    // the cache between tests...
-    FileSystem.closeAll();
-  }
-  
   @Test
   public void testHftpDefaultPorts() throws IOException {
-    resetFileSystem();
     Configuration conf = new Configuration();
     URI uri = URI.create("hftp://localhost");
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
@@ -309,7 +305,6 @@ public class TestHftpFileSystem {
   
   @Test
   public void testHftpCustomDefaultPorts() throws IOException {
-    resetFileSystem();
     Configuration conf = new Configuration();
     conf.setInt("dfs.http.port", 123);
     conf.setInt("dfs.https.port", 456);
@@ -329,7 +324,6 @@ public class TestHftpFileSystem {
 
   @Test
   public void testHftpCustomUriPortWithDefaultPorts() throws IOException {
-    resetFileSystem();
     Configuration conf = new Configuration();
     URI uri = URI.create("hftp://localhost:123");
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
@@ -346,7 +340,6 @@ public class TestHftpFileSystem {
 
   @Test
   public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
-    resetFileSystem();
     Configuration conf = new Configuration();
     conf.setInt("dfs.http.port", 123);
     conf.setInt("dfs.https.port", 456);
@@ -368,7 +361,6 @@ public class TestHftpFileSystem {
 
   @Test
   public void testHsftpDefaultPorts() throws IOException {
-    resetFileSystem();
     Configuration conf = new Configuration();
     URI uri = URI.create("hsftp://localhost");
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
@@ -385,7 +377,6 @@ public class TestHftpFileSystem {
 
   @Test
   public void testHsftpCustomDefaultPorts() throws IOException {
-    resetFileSystem();
     Configuration conf = new Configuration();
     conf.setInt("dfs.http.port", 123);
     conf.setInt("dfs.https.port", 456);
@@ -405,7 +396,6 @@ public class TestHftpFileSystem {
 
   @Test
   public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
-    resetFileSystem();
     Configuration conf = new Configuration();
     URI uri = URI.create("hsftp://localhost:123");
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
@@ -422,7 +412,6 @@ public class TestHftpFileSystem {
 
   @Test
   public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
-    resetFileSystem();
     Configuration conf = new Configuration();
     conf.setInt("dfs.http.port", 123);
     conf.setInt("dfs.https.port", 456);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Thu Nov  8 19:09:46 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.bl
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -44,6 +45,10 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.Time;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -375,7 +380,71 @@ public class TestReplicationPolicy {
         new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
-    assertFalse(cluster.isOnSameRack(targets[0], targets[1]));    
+    assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
+  }
+  
+  /**
+   * In this testcase, it tries to choose more targets than available nodes and
+   * check the result. 
+   * @throws Exception
+   */
+  @Test
+  public void testChooseTargetWithMoreThanAvaiableNodes() throws Exception {
+    // make data node 0 & 1 to be not qualified to choose: not enough disk space
+    for(int i=0; i<2; i++) {
+      dataNodes[i].updateHeartbeat(
+          2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+          (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
+    }
+    
+    final TestAppender appender = new TestAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
+    
+    // try to choose NUM_OF_DATANODES which is more than actually available
+    // nodes.
+    DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 
+        NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
+        BLOCK_SIZE);
+    assertEquals(targets.length, NUM_OF_DATANODES - 2);
+
+    final List<LoggingEvent> log = appender.getLog();
+    assertNotNull(log);
+    assertFalse(log.size() == 0);
+    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
+    
+    assertEquals(lastLogEntry.getLevel(), Level.WARN);
+    // Suppose to place replicas on each node but two data nodes are not
+    // available for placing replica, so here we expect a short of 2
+    assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
+    
+    for(int i=0; i<2; i++) {
+      dataNodes[i].updateHeartbeat(
+          2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+          HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+    }
+  }
+  
+  class TestAppender extends AppenderSkeleton {
+    private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
+
+    @Override
+    public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override
+    protected void append(final LoggingEvent loggingEvent) {
+      log.add(loggingEvent);
+    }
+
+    @Override
+    public void close() {
+    }
+
+    public List<LoggingEvent> getLog() {
+      return new ArrayList<LoggingEvent>(log);
+    }
   }
 
   private boolean containsWithinRange(DatanodeDescriptor target,

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java Thu Nov  8 19:09:46 2012
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.junit.Test;
 
 public class TestUnderReplicatedBlocks {
@@ -49,6 +50,12 @@ public class TestUnderReplicatedBlocks {
       ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
       DatanodeDescriptor dn = bm.blocksMap.nodeIterator(b.getLocalBlock()).next();
       bm.addToInvalidates(b.getLocalBlock(), dn);
+      // Compute the invalidate work in NN, and trigger the heartbeat from DN
+      BlockManagerTestUtil.computeAllPendingWork(bm);
+      DataNodeTestUtils.triggerHeartbeat(cluster.getDataNode(dn.getIpcPort()));
+      // Wait to make sure the DataNode receives the deletion request 
+      Thread.sleep(1000);
+      // Remove the record from blocksMap
       bm.blocksMap.removeNode(b.getLocalBlock(), dn);
       
       // increment this file's replication factor

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java Thu Nov  8 19:09:46 2012
@@ -83,8 +83,7 @@ public class TestBlockUnderConstruction 
   private void verifyFileBlocks(String file,
                                 boolean isFileOpen) throws IOException {
     FSNamesystem ns = cluster.getNamesystem();
-    INodeFile inode = ns.dir.getFileINode(file);
-    assertTrue("File does not exist: " + inode.toString(), inode != null);
+    final INodeFile inode = INodeFile.valueOf(ns.dir.getINode(file), file);
     assertTrue("File " + inode.toString() +
         " isUnderConstruction = " + inode.isUnderConstruction() +
         " expected to be " + isFileOpen,

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Thu Nov  8 19:09:46 2012
@@ -32,7 +32,6 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -60,17 +59,11 @@ public class TestFsLimits {
     return fsn;
   }
   
-  private static class TestFSDirectory extends FSDirectory {
-    public TestFSDirectory() throws IOException {
+  private static class MockFSDirectory extends FSDirectory {
+    public MockFSDirectory() throws IOException {
       super(new FSImage(conf), getMockNamesystem(), conf);
       setReady(fsIsReady);
     }
-    
-    @Override
-    public <T extends INode> void verifyFsLimits(INode[] pathComponents,
-        int pos, T child) throws FSLimitException {
-      super.verifyFsLimits(pathComponents, pos, child);
-    }
   }
 
   @Before
@@ -157,7 +150,7 @@ public class TestFsLimits {
   private void addChildWithName(String name, Class<?> expected)
   throws Exception {
     // have to create after the caller has had a chance to set conf values
-    if (fs == null) fs = new TestFSDirectory();
+    if (fs == null) fs = new MockFSDirectory();
 
     INode child = new INodeDirectory(name, perms);
     child.setLocalName(name);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Thu Nov  8 19:09:46 2012
@@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSck;
@@ -678,11 +679,11 @@ public class TestFsck {
       DFSTestUtil.waitReplication(fs, filePath, (short)1);
       
       // intentionally corrupt NN data structure
-      INodeFile node = 
-        (INodeFile)cluster.getNamesystem().dir.rootDir.getNode(fileName,
-                                                               true);
-      assertEquals(node.blocks.length, 1);
-      node.blocks[0].setNumBytes(-1L);  // set the block length to be negative
+      INodeFile node = (INodeFile)cluster.getNamesystem().dir.rootDir.getNode(
+          fileName, true);
+      final BlockInfo[] blocks = node.getBlocks(); 
+      assertEquals(blocks.length, 1);
+      blocks[0].setNumBytes(-1L);  // set the block length to be negative
       
       // run fsck and expect a failure with -1 as the error code
       String outStr = runFsck(conf, -1, true, fileName);



Mime
View raw message