hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r673857 [4/6] - in /hadoop/core/trunk: ./ bin/ conf/ docs/ src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ src/contrib/index/src/java/org/apache/hadoop/contri...
Date Thu, 03 Jul 2008 22:55:18 GMT
Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=673857&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Thu Jul  3 15:55:06 2008
@@ -0,0 +1,444 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+
+
+/**
+ * Directory INode class.
+ */
+public class INodeDirectory extends INode {
+  protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
+  final static String ROOT_NAME = "";
+
+  private List<INode> children;
+
+  INodeDirectory(String name, PermissionStatus permissions) {
+    super(name, permissions);
+    this.children = null;
+  }
+
+  public INodeDirectory(PermissionStatus permissions, long mTime) {
+    super(permissions, mTime);
+    this.children = null;
+  }
+
+  /** constructor */
+  INodeDirectory(byte[] localName, PermissionStatus permissions, long mTime) {
+    this(permissions, mTime);
+    this.name = localName;
+  }
+  
+  /** copy constructor
+   * 
+   * @param other
+   */
+  INodeDirectory(INodeDirectory other) {
+    super(other);
+    this.children = other.getChildren();
+  }
+  
+  /**
+   * Check whether it's a directory
+   */
+  public boolean isDirectory() {
+    return true;
+  }
+
+  INode removeChild(INode node) {
+    assert children != null;
+    int low = Collections.binarySearch(children, node.name);
+    if (low >= 0) {
+      return children.remove(low);
+    } else {
+      return null;
+    }
+  }
+
+  /** Replace a child that has the same name as newChild by newChild.
+   * 
+   * @param newChild Child node to be added
+   */
+  void replaceChild(INode newChild) {
+    if ( children == null ) {
+      throw new IllegalArgumentException("The directory is empty");
+    }
+    int low = Collections.binarySearch(children, newChild.name);
+    if (low>=0) { // an old child exists so replace by the newChild
+      children.set(low, newChild);
+    } else {
+      throw new IllegalArgumentException("No child exists to be replaced");
+    }
+  }
+  
+  INode getChild(String name) {
+    return getChildINode(string2Bytes(name));
+  }
+
+  private INode getChildINode(byte[] name) {
+    if (children == null) {
+      return null;
+    }
+    int low = Collections.binarySearch(children, name);
+    if (low >= 0) {
+      return children.get(low);
+    }
+    return null;
+  }
+
+  /**
+   */
+  private INode getNode(byte[][] components) {
+    INode[] inode  = new INode[1];
+    getExistingPathINodes(components, inode);
+    return inode[0];
+  }
+
+  /**
+   * This is the external interface
+   */
+  INode getNode(String path) {
+    return getNode(getPathComponents(path));
+  }
+
+  /**
+   * Retrieve existing INodes from a path. If existing is big enough to store
+   * all path components (existing and non-existing), then existing INodes
+   * will be stored starting from the root INode into existing[0]; if
+   * existing is not big enough to store all path components, then only the
+   * last existing and non existing INodes will be stored so that
+   * existing[existing.length-1] refers to the target INode.
+   * 
+   * <p>
+   * Example: <br>
+   * Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
+   * following path components: ["","c1","c2","c3"],
+   * 
+   * <p>
+   * <code>getExistingPathINodes(["","c1","c2"], [?])</code> should fill the
+   * array with [c2] <br>
+   * <code>getExistingPathINodes(["","c1","c2","c3"], [?])</code> should fill the
+   * array with [null]
+   * 
+   * <p>
+   * <code>getExistingPathINodes(["","c1","c2"], [?,?])</code> should fill the
+   * array with [c1,c2] <br>
+   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?])</code> should fill
+   * the array with [c2,null]
+   * 
+   * <p>
+   * <code>getExistingPathINodes(["","c1","c2"], [?,?,?,?])</code> should fill
+   * the array with [rootINode,c1,c2,null], <br>
+   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?])</code> should
+   * fill the array with [rootINode,c1,c2,null]
+   * @param components array of path component name
+   * @param existing INode array to fill with existing INodes
+   * @return number of existing INodes in the path
+   */
+  int getExistingPathINodes(byte[][] components, INode[] existing) {
+    assert compareBytes(this.name, components[0]) == 0 :
+      "Incorrect name " + getLocalName() + " expected " + components[0];
+
+    INode curNode = this;
+    int count = 0;
+    int index = existing.length - components.length;
+    if (index > 0)
+      index = 0;
+    while ((count < components.length) && (curNode != null)) {
+      if (index >= 0)
+        existing[index] = curNode;
+      if (!curNode.isDirectory() || (count == components.length - 1))
+        break; // no more child, stop here
+      INodeDirectory parentDir = (INodeDirectory)curNode;
+      curNode = parentDir.getChildINode(components[count + 1]);
+      count += 1;
+      index += 1;
+    }
+    return count;
+  }
+
+  /**
+   * Retrieve the existing INodes along the given path. The first INode
+   * always exist and is this INode.
+   * 
+   * @param path the path to explore
+   * @return INodes array containing the existing INodes in the order they
+   *         appear when following the path from the root INode to the
+   *         deepest INodes. The array size will be the number of expected
+   *         components in the path, and non existing components will be
+   *         filled with null
+   */
+  INode[] getExistingPathINodes(String path) {
+    byte[][] components = getPathComponents(path);
+    INode[] inodes = new INode[components.length];
+
+    this.getExistingPathINodes(components, inodes);
+    
+    return inodes;
+  }
+
+  /**
+   * Add a child inode to the directory.
+   * 
+   * @param node INode to insert
+   * @param inheritPermission inherit permission from parent?
+   * @return  null if the child with this name already exists; 
+   *          inserted INode, otherwise
+   */
+  <T extends INode> T addChild(final T node, boolean inheritPermission) {
+    if (inheritPermission) {
+      FsPermission p = getFsPermission();
+      //make sure the  permission has wx for the user
+      if (!p.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
+        p = new FsPermission(p.getUserAction().or(FsAction.WRITE_EXECUTE),
+            p.getGroupAction(), p.getOtherAction());
+      }
+      node.setPermission(p);
+    }
+
+    if (children == null) {
+      children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
+    }
+    int low = Collections.binarySearch(children, node.name);
+    if(low >= 0)
+      return null;
+    node.parent = this;
+    children.add(-low - 1, node);
+    // update modification time of the parent directory
+    setModificationTime(node.getModificationTime());
+    if (node.getGroupName() == null) {
+      node.setGroup(getGroupName());
+    }
+    return node;
+  }
+
+  /**
+   * Equivalent to addNode(path, newNode, false).
+   * @see #addNode(String, INode, boolean)
+   */
+  <T extends INode> T addNode(String path, T newNode) throws FileNotFoundException {
+    return addNode(path, newNode, false);
+  }
+  /**
+   * Add new INode to the file tree.
+   * Find the parent and insert 
+   * 
+   * @param path file path
+   * @param newNode INode to be added
+   * @param inheritPermission If true, copy the parent's permission to newNode.
+   * @return null if the node already exists; inserted INode, otherwise
+   * @throws FileNotFoundException if parent does not exist or 
+   * is not a directory.
+   */
+  <T extends INode> T addNode(String path, T newNode, boolean inheritPermission
+      ) throws FileNotFoundException {
+    if(addToParent(path, newNode, null, inheritPermission) == null)
+      return null;
+    return newNode;
+  }
+
+  /**
+   * Add new inode to the parent if specified.
+   * Optimized version of addNode() if parent is not null.
+   * 
+   * @return  parent INode if new inode is inserted
+   *          or null if it already exists.
+   * @throws  FileNotFoundException if parent does not exist or 
+   *          is not a directory.
+   */
+  <T extends INode> INodeDirectory addToParent(
+                                      String path,
+                                      T newNode,
+                                      INodeDirectory parent,
+                                      boolean inheritPermission
+                                    ) throws FileNotFoundException {
+    byte[][] pathComponents = getPathComponents(path);
+    assert pathComponents != null : "Incorrect path " + path;
+    int pathLen = pathComponents.length;
+    if (pathLen < 2)  // add root
+      return null;
+    if(parent == null) {
+      // Gets the parent INode
+      INode[] inodes  = new INode[2];
+      getExistingPathINodes(pathComponents, inodes);
+      INode inode = inodes[0];
+      if (inode == null) {
+        throw new FileNotFoundException("Parent path does not exist: "+path);
+      }
+      if (!inode.isDirectory()) {
+        throw new FileNotFoundException("Parent path is not a directory: "+path);
+      }
+      parent = (INodeDirectory)inode;
+    }
+    // insert into the parent children list
+    newNode.name = pathComponents[pathLen-1];
+    if(parent.addChild(newNode, inheritPermission) == null)
+      return null;
+    return parent;
+  }
+
+  /**
+   */
+  long numItemsInTree() {
+    long total = 1L;
+    if (children == null) {
+      return total;
+    }
+    for (INode child : children) {
+      total += child.numItemsInTree();
+    }
+    return total;
+  }
+
+  /** {@inheritDoc} */
+  long[] computeContentSummary(long[] summary) {
+    if (children != null) {
+      for (INode child : children) {
+        child.computeContentSummary(summary);
+      }
+    }
+    summary[2]++;
+    return summary;
+  }
+
+  /**
+   */
+  List<INode> getChildren() {
+    return children==null ? new ArrayList<INode>() : children;
+  }
+  List<INode> getChildrenRaw() {
+    return children;
+  }
+
+  int collectSubtreeBlocksAndClear(List<Block> v) {
+    int total = 1;
+    if (children == null) {
+      return total;
+    }
+    for (INode child : children) {
+      total += child.collectSubtreeBlocksAndClear(v);
+    }
+    parent = null;
+    children = null;
+    return total;
+  }
+}
+
+/**
+ * Directory INode class that has a quota restriction
+ */
+class INodeDirectoryWithQuota extends INodeDirectory {
+  private long quota;
+  private long count;
+  
+  /** Convert an existing directory inode to one with the given quota
+   * 
+   * @param quota Quota to be assigned to this inode
+   * @param other The other inode from which all other properties are copied
+   */
+  INodeDirectoryWithQuota(long quota, INodeDirectory other)
+  throws QuotaExceededException {
+    super(other);
+    this.count = other.numItemsInTree();
+    setQuota(quota);
+  }
+  
+  /** constructor with no quota verification */
+  INodeDirectoryWithQuota(
+      PermissionStatus permissions, long modificationTime, long quota)
+  {
+    super(permissions, modificationTime);
+    this.quota = quota;
+  }
+  
+  /** constructor with no quota verification */
+  INodeDirectoryWithQuota(String name, PermissionStatus permissions, long quota)
+  {
+    super(name, permissions);
+    this.quota = quota;
+  }
+  
+  /** Get this directory's quota
+   * @return this directory's quota
+   */
+  long getQuota() {
+    return quota;
+  }
+  
+  /** Set this directory's quota
+   * 
+   * @param quota Quota to be set
+   * @throws QuotaExceededException if the given quota is less than 
+   *                                the size of the tree
+   */
+  void setQuota(long quota) throws QuotaExceededException {
+    verifyQuota(quota, this.count);
+    this.quota = quota;
+  }
+  
+  /** Get the number of names in the subtree rooted at this directory
+   * @return the size of the subtree rooted at this directory
+   */
+  long numItemsInTree() {
+    return count;
+  }
+  
+  /** Update the size of the tree
+   * 
+   * @param delta the change of the tree size
+   * @throws QuotaExceededException if the changed size is greater 
+   *                                than the quota
+   */
+  void updateNumItemsInTree(long delta) throws QuotaExceededException {
+    long newCount = this.count + delta;
+    if (delta>0) {
+      verifyQuota(this.quota, newCount);
+    }
+    this.count = newCount;
+  }
+  
+  /** Set the size of the tree rooted at this directory
+   * 
+   * @param count size of the directory to be set
+   * @throws QuotaExceededException if the given count is greater than quota
+   */
+  void setCount(long count) throws QuotaExceededException {
+    verifyQuota(this.quota, count);
+    this.count = count;
+  }
+  
+  /** Verify if the count satisfies the quota restriction 
+   * @throws QuotaExceededException if the given quota is less than the count
+   */
+  private static void verifyQuota(long quota, long count)
+  throws QuotaExceededException {
+    if (quota < count) {
+      throw new QuotaExceededException(quota, count);
+    }
+  }
+}

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=673857&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Thu Jul  3 15:55:06 2008
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
+
+public class INodeFile extends INode {
+  static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
+
+  protected BlockInfo blocks[] = null;
+  protected short blockReplication;
+  protected long preferredBlockSize;
+
+  INodeFile(PermissionStatus permissions,
+            int nrBlocks, short replication, long modificationTime,
+            long preferredBlockSize) {
+    this(permissions, new BlockInfo[nrBlocks], replication,
+        modificationTime, preferredBlockSize);
+  }
+
+  protected INodeFile() {
+    blocks = null;
+    blockReplication = 0;
+    preferredBlockSize = 0;
+  }
+
+  protected INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
+                      short replication, long modificationTime,
+                      long preferredBlockSize) {
+    super(permissions, modificationTime);
+    this.blockReplication = replication;
+    this.preferredBlockSize = preferredBlockSize;
+    blocks = blklist;
+  }
+
+  /**
+   * Set the {@link FsPermission} of this {@link INodeFile}.
+   * Since this is a file,
+   * the {@link FsAction#EXECUTE} action, if any, is ignored.
+   */
+  protected void setPermission(FsPermission permission) {
+    super.setPermission(permission.applyUMask(UMASK));
+  }
+
+  public boolean isDirectory() {
+    return false;
+  }
+
+  /**
+   * Get block replication for the file 
+   * @return block replication
+   */
+  public short getReplication() {
+    return this.blockReplication;
+  }
+
+  void setReplication(short replication) {
+    this.blockReplication = replication;
+  }
+
+  /**
+   * Get file blocks 
+   * @return file blocks
+   */
+  BlockInfo[] getBlocks() {
+    return this.blocks;
+  }
+
+  /**
+   * add a block to the block list
+   */
+  void addBlock(BlockInfo newblock) {
+    if (this.blocks == null) {
+      this.blocks = new BlockInfo[1];
+      this.blocks[0] = newblock;
+    } else {
+      int size = this.blocks.length;
+      BlockInfo[] newlist = new BlockInfo[size + 1];
+      for (int i = 0; i < size; i++) {
+        newlist[i] = this.blocks[i];
+      }
+      newlist[size] = newblock;
+      this.blocks = newlist;
+    }
+  }
+
+  /**
+   * Set file block
+   */
+  void setBlock(int idx, BlockInfo blk) {
+    this.blocks[idx] = blk;
+  }
+
+  int collectSubtreeBlocksAndClear(List<Block> v) {
+    parent = null;
+    for (Block blk : blocks) {
+      v.add(blk);
+    }
+    blocks = null;
+    return 1;
+  }
+
+  /** {@inheritDoc} */
+  long[] computeContentSummary(long[] summary) {
+    long bytes = 0;
+    for(Block blk : blocks) {
+      bytes += blk.getNumBytes();
+    }
+    summary[0] += bytes;
+    summary[1]++;
+    return summary;
+  }
+
+  /**
+   * Get the preferred block size of the file.
+   * @return the number of bytes
+   */
+  public long getPreferredBlockSize() {
+    return preferredBlockSize;
+  }
+
+  /**
+   * Return the penultimate allocated block for this file.
+   */
+  Block getPenultimateBlock() {
+    if (blocks == null || blocks.length <= 1) {
+      return null;
+    }
+    return blocks[blocks.length - 2];
+  }
+
+  INodeFileUnderConstruction toINodeFileUnderConstruction(
+      String clientName, String clientMachine, DatanodeDescriptor clientNode
+      ) throws IOException {
+    if (isUnderConstruction()) {
+      return (INodeFileUnderConstruction)this;
+    }
+    return new INodeFileUnderConstruction(name,
+        blockReplication, modificationTime, preferredBlockSize,
+        blocks, getPermissionStatus(),
+        clientName, clientMachine, clientNode);
+  }
+}

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=673857&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Thu Jul  3 15:55:06 2008
@@ -0,0 +1,168 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
+
+
+public class INodeFileUnderConstruction extends INodeFile {
+  StringBytesWritable clientName = null;         // lease holder
+  StringBytesWritable clientMachine = null;
+  DatanodeDescriptor clientNode = null; // if client is a cluster node too.
+
+  private int primaryNodeIndex = -1; //the node working on lease recovery
+  private DatanodeDescriptor[] targets = null;   //locations for last block
+  
+  INodeFileUnderConstruction() {}
+
+  INodeFileUnderConstruction(PermissionStatus permissions,
+                             short replication,
+                             long preferredBlockSize,
+                             long modTime,
+                             String clientName,
+                             String clientMachine,
+                             DatanodeDescriptor clientNode) 
+                             throws IOException {
+    super(permissions.applyUMask(UMASK), 0, replication, modTime,
+        preferredBlockSize);
+    this.clientName = new StringBytesWritable(clientName);
+    this.clientMachine = new StringBytesWritable(clientMachine);
+    this.clientNode = clientNode;
+  }
+
+  public INodeFileUnderConstruction(byte[] name,
+                             short blockReplication,
+                             long modificationTime,
+                             long preferredBlockSize,
+                             BlockInfo[] blocks,
+                             PermissionStatus perm,
+                             String clientName,
+                             String clientMachine,
+                             DatanodeDescriptor clientNode)
+                             throws IOException {
+    super(perm, blocks, blockReplication, modificationTime, 
+          preferredBlockSize);
+    setLocalName(name);
+    this.clientName = new StringBytesWritable(clientName);
+    this.clientMachine = new StringBytesWritable(clientMachine);
+    this.clientNode = clientNode;
+  }
+
+  String getClientName() throws IOException {
+    return clientName.getString();
+  }
+
+  String getClientMachine() throws IOException {
+    return clientMachine.getString();
+  }
+
+  DatanodeDescriptor getClientNode() {
+    return clientNode;
+  }
+
+  /**
+   * Is this inode being constructed?
+   */
+  @Override
+  boolean isUnderConstruction() {
+    return true;
+  }
+
+  DatanodeDescriptor[] getTargets() {
+    return targets;
+  }
+
+  void setTargets(DatanodeDescriptor[] targets) {
+    this.targets = targets;
+    this.primaryNodeIndex = -1;
+  }
+
+  //
+  // converts a INodeFileUnderConstruction into a INodeFile
+  //
+  INodeFile convertToInodeFile() {
+    INodeFile obj = new INodeFile(getPermissionStatus(),
+                                  getBlocks(),
+                                  getReplication(),
+                                  getModificationTime(),
+                                  getPreferredBlockSize());
+    return obj;
+    
+  }
+
+  /**
+   * remove a block from the block list. This block should be
+   * the last one on the list.
+   */
+  void removeBlock(Block oldblock) throws IOException {
+    if (blocks == null) {
+      throw new IOException("Trying to delete non-existant block " + oldblock);
+    }
+    int size_1 = blocks.length - 1;
+    if (!blocks[size_1].equals(oldblock)) {
+      throw new IOException("Trying to delete non-last block " + oldblock);
+    }
+
+    //copy to a new list
+    BlockInfo[] newlist = new BlockInfo[size_1];
+    System.arraycopy(blocks, 0, newlist, 0, size_1);
+    blocks = newlist;
+    
+    // Remove the block locations for the last block.
+    targets = null;
+  }
+
+  void setLastBlock(BlockInfo newblock, DatanodeDescriptor[] newtargets
+      ) throws IOException {
+    if (blocks == null) {
+      throw new IOException("Trying to update non-existant block (newblock="
+          + newblock + ")");
+    }
+    blocks[blocks.length - 1] = newblock;
+    setTargets(newtargets);
+  }
+
+  /**
+   * Initialize lease recovery for this object
+   */
+  void assignPrimaryDatanode() {
+    //assign the first alive datanode as the primary datanode
+
+    if (targets.length == 0) {
+      NameNode.stateChangeLog.warn("BLOCK*"
+        + " INodeFileUnderConstruction.initLeaseRecovery:"
+        + " No blocks found, lease removed.");
+    }
+
+    int previous = primaryNodeIndex;
+    //find an alive datanode beginning from previous
+    for(int i = 1; i <= targets.length; i++) {
+      int j = (previous + i)%targets.length;
+      if (targets[j].isAlive) {
+        DatanodeDescriptor primary = targets[primaryNodeIndex = j]; 
+        primary.addBlockToBeRecovered(blocks[blocks.length - 1], targets);
+        NameNode.stateChangeLog.info("BLOCK* " + blocks[blocks.length - 1]
+          + " recovery started.");
+      }
+    }
+  }
+}

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/JspHelper.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/JspHelper.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/JspHelper.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
@@ -32,7 +32,14 @@
 import javax.servlet.jsp.JspWriter;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.net.NetUtils;
@@ -42,13 +49,13 @@
   final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi";
 
   static FSNamesystem fsn = null;
-  static InetSocketAddress nameNodeAddr;
+  public static InetSocketAddress nameNodeAddr;
   public static Configuration conf = new Configuration();
   public static final UnixUserGroupInformation webUGI
   = UnixUserGroupInformation.createImmutable(
       conf.getStrings(WEB_UGI_PROPERTY_NAME));
 
-  static int defaultChunkSizeToView = 
+  public static int defaultChunkSizeToView = 
     conf.getInt("dfs.default.chunk.view.size", 32 * 1024);
   static Random rand = new Random();
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseExpiredException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseExpiredException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseExpiredException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseManager.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseManager.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseManager.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 import java.util.*;
@@ -25,6 +25,14 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 
 /**
  * LeaseManager does the lease housekeeping for writing on files.   
@@ -48,8 +56,8 @@
  *      and removes the lease once all files have been removed
  * 2.10) Namenode commit changes to edit log
  */
-class LeaseManager {
-  static final Log LOG = LogFactory.getLog(LeaseManager.class);
+public class LeaseManager {
+  public static final Log LOG = LogFactory.getLog(LeaseManager.class);
 
   private final FSNamesystem fsnamesystem;
 
@@ -79,14 +87,14 @@
   SortedSet<Lease> getSortedLeases() {return sortedLeases;}
 
   /** @return the lease containing src */
-  Lease getLeaseByPath(String src) {return sortedLeasesByPath.get(src);}
+  public Lease getLeaseByPath(String src) {return sortedLeasesByPath.get(src);}
 
   /** list of blocks being recovered */
   private static Map<Block, Block> ongoingRecovery = new HashMap<Block, Block>();
 
 
   /** @return the number of leases currently in the system */
-  synchronized int countLease() {return sortedLeases.size();}
+  public synchronized int countLease() {return sortedLeases.size();}
 
   /** @return the number of paths contained in all leases */
   synchronized int countPath() {
@@ -326,7 +334,7 @@
     return entries;
   }
 
-  void setLeasePeriod(long softLimit, long hardLimit) {
+  public void setLeasePeriod(long softLimit, long hardLimit) {
     this.softLimit = softLimit;
     this.hardLimit = hardLimit; 
   }
@@ -405,7 +413,7 @@
    * Recover a list of blocks.
    * This method is invoked by the primary datanode.
    */
-  static void recoverBlocks(Block[] blocks, DatanodeID[][] targets,
+  public static void recoverBlocks(Block[] blocks, DatanodeID[][] targets,
       DatanodeProtocol namenode, Configuration conf) {
     for(int i = 0; i < blocks.length; i++) {
       try {
@@ -417,7 +425,7 @@
   }
 
   /** Recover a block */
-  static Block recoverBlock(Block block, DatanodeID[] datanodeids,
+  public static Block recoverBlock(Block block, DatanodeID[] datanodeids,
       DatanodeProtocol namenode, Configuration conf,
       boolean closeFile) throws IOException {
 
@@ -426,7 +434,7 @@
     // file at the same time.
     synchronized (ongoingRecovery) {
       Block tmp = new Block();
-      tmp.set(block.blkid, block.len, GenerationStamp.WILDCARD_STAMP);
+      tmp.set(block.getBlockId(), block.getNumBytes(), GenerationStamp.WILDCARD_STAMP);
       if (ongoingRecovery.get(tmp) != null) {
         String msg = "Block " + block + " is already being recovered, " +
                      " ignoring this request to recover it.";
@@ -450,10 +458,10 @@
           InterDatanodeProtocol datanode
               = DataNode.createInterDataNodeProtocolProxy(id, conf);
           BlockMetaDataInfo info = datanode.getBlockMetaDataInfo(block);
-          if (info != null && info.getGenerationStamp() >= block.generationStamp) {
+          if (info != null && info.getGenerationStamp() >= block.getGenerationStamp()) {
             syncList.add(new BlockRecord(id, datanode, new Block(info)));
-            if (info.len < minlength) {
-              minlength = info.len;
+            if (info.getNumBytes() < minlength) {
+              minlength = info.getNumBytes();
             }
           }
         } catch (IOException e) {
@@ -496,7 +504,7 @@
     List<DatanodeID> successList = new ArrayList<DatanodeID>();
 
     long generationstamp = namenode.nextGenerationStamp(block);
-    Block newblock = new Block(block.blkid, minlength, generationstamp);
+    Block newblock = new Block(block.getBlockId(), minlength, generationstamp);
 
     for(BlockRecord r : syncList) {
       try {
@@ -510,7 +518,7 @@
 
     if (!successList.isEmpty()) {
       namenode.commitBlockSynchronization(block,
-          newblock.generationStamp, newblock.len, closeFile, false,
+          newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false,
           successList.toArray(new DatanodeID[successList.size()]));
       return newblock; // success
     }

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ListPathsServlet.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ListPathsServlet.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ListPathsServlet.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Thu Jul  3 15:55:06 2008
@@ -15,8 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DFSFileInfo;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
@@ -39,10 +41,10 @@
 
 /**
  * Obtain meta-information about a filesystem.
- * @see org.apache.hadoop.dfs.HftpFileSystem
+ * @see org.apache.hadoop.hdfs.HftpFileSystem
  */
 public class ListPathsServlet extends DfsServlet {
-  static final SimpleDateFormat df =
+  public static final SimpleDateFormat df =
     new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
   static {
     df.setTimeZone(TimeZone.getTimeZone("UTC"));

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.commons.logging.*;
 
@@ -24,6 +24,17 @@
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.StringUtils;
@@ -87,10 +98,9 @@
     
   public static final int DEFAULT_PORT = 8020;
 
-  public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
-  public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.dfs.StateChange");
-
-  FSNamesystem namesystem;
+  public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
+  public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
+  public FSNamesystem namesystem;
   private Server server;
   private Thread emptier;
   private int handlerCount = 2;
@@ -112,15 +122,15 @@
     return myMetrics;
   }
   
-  static InetSocketAddress getAddress(String address) {
+  public static InetSocketAddress getAddress(String address) {
     return NetUtils.createSocketAddr(address, DEFAULT_PORT);
   }
 
-  static InetSocketAddress getAddress(Configuration conf) {
+  public static InetSocketAddress getAddress(Configuration conf) {
     return getAddress(FileSystem.getDefaultUri(conf).getAuthority());
   }
 
-  static URI getUri(InetSocketAddress namenode) {
+  public static URI getUri(InetSocketAddress namenode) {
     int port = namenode.getPort();
     String portString = port == DEFAULT_PORT ? "" : (":"+port);
     return URI.create("hdfs://"+ namenode.getHostName()+portString);
@@ -158,11 +168,11 @@
    * <p>
    * The name-node can be started with one of the following startup options:
    * <ul> 
-   * <li>{@link FSConstants.StartupOption#REGULAR REGULAR} - normal startup</li>
-   * <li>{@link FSConstants.StartupOption#FORMAT FORMAT} - format name node</li>
-   * <li>{@link FSConstants.StartupOption#UPGRADE UPGRADE} - start the cluster  
+   * <li>{@link org.apache.hadoop.hdfs.protocol.FSConstants.StartupOption#REGULAR REGULAR} - normal startup</li>
+   * <li>{@link org.apache.hadoop.hdfs.protocol.FSConstants.StartupOption#FORMAT FORMAT} - format name node</li>
+   * <li>{@link org.apache.hadoop.hdfs.protocol.FSConstants.StartupOption#UPGRADE UPGRADE} - start the cluster  
    * upgrade and create a snapshot of the current file system state</li> 
-   * <li>{@link FSConstants.StartupOption#ROLLBACK ROLLBACK} - roll the  
+   * <li>{@link org.apache.hadoop.hdfs.protocol.FSConstants.StartupOption#ROLLBACK ROLLBACK} - roll the  
    *            cluster back to the previous state</li>
    * </ul>
    * The option is passed via configuration field: 
@@ -289,7 +299,7 @@
   }
 
   /** Coming in a future release.... */
-  void append(String src, String clientName) throws IOException {
+  public void append(String src, String clientName) throws IOException {
     String clientMachine = getClientMachine();
     if (stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*DIR* NameNode.append: file "
@@ -680,7 +690,7 @@
     return getFSImage().getFsImageName();
   }
     
-  FSImage getFSImage() {
+  public FSImage getFSImage() {
     return namesystem.dir.fsImage;
   }
 
@@ -802,7 +812,7 @@
                                           StartupOption.REGULAR.toString()));
   }
 
-  static NameNode createNameNode(String argv[], 
+  public static NameNode createNameNode(String argv[], 
                                  Configuration conf) throws IOException {
     if (conf == null)
       conf = new Configuration();

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NamenodeFsck.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NamenodeFsck.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -33,7 +33,14 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
-import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DFSFileInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 
 /**
@@ -59,7 +66,7 @@
  *  factors of each file.
  */
 public class NamenodeFsck {
-  public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
+  public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   
   /** Don't attempt any fixing . */
   public static final int FIXING_NONE = 0;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NotReplicatedYetException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NotReplicatedYetException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NotReplicatedYetException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/PendingReplicationBlocks.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/PendingReplicationBlocks.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/PendingReplicationBlocks.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java Thu Jul  3 15:55:06 2008
@@ -15,8 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.util.*;
 import java.io.*;
 import java.util.*;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/PermissionChecker.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/PermissionChecker.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/PermissionChecker.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.*;
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ReplicationTargetChooser.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ReplicationTargetChooser.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ReplicationTargetChooser.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ReplicationTargetChooser.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ReplicationTargetChooser.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ReplicationTargetChooser.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ReplicationTargetChooser.java Thu Jul  3 15:55:06 2008
@@ -15,9 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.commons.logging.*;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SafeModeException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SafeModeException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SafeModeException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SecondaryNameNode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SecondaryNameNode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SecondaryNameNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Thu Jul  3 15:55:06 2008
@@ -15,11 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.commons.logging.*;
 
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.StringUtils;
@@ -49,7 +52,7 @@
 public class SecondaryNameNode implements FSConstants, Runnable {
     
   public static final Log LOG = 
-    LogFactory.getLog("org.apache.hadoop.dfs.NameNode.Secondary");
+    LogFactory.getLog(SecondaryNameNode.class.getName());
 
   private String fsName;
   private CheckpointStorage checkpointImage;
@@ -103,7 +106,7 @@
   /**
    * Create a connection to the primary namenode.
    */
-  SecondaryNameNode(Configuration conf)  throws IOException {
+  public SecondaryNameNode(Configuration conf)  throws IOException {
     try {
       initialize(conf);
     } catch(IOException e) {
@@ -469,6 +472,7 @@
     }
 
     @Override
+    public
     boolean isConversionNeeded(StorageDirectory sd) {
       return false;
     }

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SerialNumberManager.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SerialNumberManager.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/SerialNumberManager.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.*;
 
@@ -69,4 +69,4 @@
       return "max=" + max + ",\n  t2i=" + t2i + ",\n  i2t=" + i2t;
     }
   }
-}
\ No newline at end of file
+}

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/StreamFile.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/StreamFile.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/StreamFile.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Thu Jul  3 15:55:06 2008
@@ -15,13 +15,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import javax.servlet.*;
 import javax.servlet.http.*;
 import java.io.*;
 import java.net.*;
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.conf.*;
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StringBytesWritable.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/StringBytesWritable.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StringBytesWritable.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StringBytesWritable.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/StringBytesWritable.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/StringBytesWritable.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StringBytesWritable.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 import org.apache.hadoop.io.BytesWritable;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/TransferFsImage.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/TransferFsImage.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/TransferFsImage.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.*;
 import java.net.*;
@@ -24,7 +24,8 @@
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletRequest;
 
-import org.apache.hadoop.dfs.SecondaryNameNode.ErrorSimulator;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.ErrorSimulator;
 
 /**
  * This class provides fetching a specified file from the NameNode.

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UnderReplicatedBlocks.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UnderReplicatedBlocks.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UnderReplicatedBlocks.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java Thu Jul  3 15:55:06 2008
@@ -15,10 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.*;
 
+import org.apache.hadoop.hdfs.protocol.Block;
+
 /* Class for keeping track of under replication blocks
  * Blocks have replication priority, with priority 0 indicating the highest
  * Blocks have only one replicas has the highest

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManagerNamenode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManagerNamenode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManagerNamenode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java Thu Jul  3 15:55:06 2008
@@ -15,12 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.SortedSet;
 import java.io.IOException;
 
-import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
+import org.apache.hadoop.hdfs.server.common.UpgradeManager;
+import org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.common.Upgradeable;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 
 /**
  * Upgrade manager for name-nodes.
@@ -44,7 +51,7 @@
    * @return true if distributed upgrade is required or false otherwise
    * @throws IOException
    */
-  synchronized boolean startUpgrade() throws IOException {
+  public synchronized boolean startUpgrade() throws IOException {
     if(!upgradeState) {
       initializeUpgrade();
       if(!upgradeState) return false;
@@ -95,7 +102,7 @@
     return reply;
   }
 
-  synchronized void completeUpgrade() throws IOException {
+  public synchronized void completeUpgrade() throws IOException {
     // set and write new upgrade state into disk
     setUpgradeState(false, FSConstants.LAYOUT_VERSION);
     FSNamesystem.getFSNamesystem().getFSImage().writeAll();

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectNamenode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectNamenode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectNamenode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java Thu Jul  3 15:55:06 2008
@@ -15,15 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.UpgradeObject;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+
 /**
  * Base class for name-node upgrade objects.
  * Data-node upgrades are run in separate threads.
  */
-abstract class UpgradeObjectNamenode extends UpgradeObject {
+public abstract class UpgradeObjectNamenode extends UpgradeObject {
 
   /**
    * Process an upgrade command.
@@ -35,7 +39,7 @@
    * @param command
    * @return the reply command which is analyzed on the client side.
    */
-  abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command
+  public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command
                                                ) throws IOException;
 
   public FSConstants.NodeType getType() {
@@ -50,7 +54,7 @@
                               getVersion(), (short)0);
   }
 
-  FSNamesystem getFSNamesystem() {
+  protected FSNamesystem getFSNamesystem() {
     return FSNamesystem.getFSNamesystem();
   }
 

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java?rev=673857&r1=673837&r2=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs.namenode.metrics;
+package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 /**
  * 
@@ -26,7 +26,7 @@
  * 
  * <p>
  * Name Node runtime statistic  info is report in another MBean
- * @see org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean
+ * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeStatisticsMBean
  *
  */
 public interface FSNamesystemMBean {

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystemMetrics.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystemMetrics.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystemMetrics.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java Thu Jul  3 15:55:06 2008
@@ -15,11 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.metrics.*;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
 import org.apache.hadoop.metrics.util.MetricsIntValue;
@@ -52,7 +53,7 @@
   public MetricsIntValue pendingReplicationBlocks = new MetricsIntValue("PendingReplicationBlocks");
   public MetricsIntValue underReplicatedBlocks = new MetricsIntValue("UnderReplicatedBlocks");
   public MetricsIntValue scheduledReplicationBlocks = new MetricsIntValue("ScheduledReplicationBlocks");
-  FSNamesystemMetrics(Configuration conf, FSNamesystem fsNameSystem) {
+  public FSNamesystemMetrics(Configuration conf, FSNamesystem fsNameSystem) {
     String sessionId = conf.get("session.id");
     this.fsNameSystem = fsNameSystem;
      

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNodeMetrics.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNodeMetrics.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNodeMetrics.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java Thu Jul  3 15:55:06 2008
@@ -15,12 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.namenode.metrics.NameNodeStatistics;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeStatistics;
 import org.apache.hadoop.metrics.*;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
 import org.apache.hadoop.metrics.util.MetricsIntValue;
@@ -60,7 +61,7 @@
     public MetricsIntValue numBlocksCorrupted = new MetricsIntValue("BlocksCorrupted");
 
       
-    NameNodeMetrics(Configuration conf, NameNode nameNode) {
+    public NameNodeMetrics(Configuration conf, NameNode nameNode) {
       String sessionId = conf.get("session.id");
       // Initiate Java VM metrics
       JvmMetrics.init("NameNode", sessionId);

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeStatistics.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeStatistics.java?rev=673857&r1=673837&r2=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeStatistics.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeStatistics.java Thu Jul  3 15:55:06 2008
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs.namenode.metrics;
+package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 import javax.management.ObjectName;
 
-import org.apache.hadoop.dfs.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.metrics.util.MBeanUtil;
 
 /**

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeStatisticsMBean.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeStatisticsMBean.java?rev=673857&r1=673837&r2=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeStatisticsMBean.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeStatisticsMBean.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs.namenode.metrics;
+package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 /**
  * 
@@ -40,7 +40,7 @@
  * The context with the update thread is used to average the data periodically.
  * <p>
  * Name Node Status info is report in another MBean
- * @see org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean
+ * @see org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean
  *
  */
 public interface NameNodeStatisticsMBean {
@@ -142,7 +142,7 @@
   
   /**
    * Number of
-   * {@link org.apache.hadoop.dfs.NameNode#getBlockLocations(String,long,long)}
+   * {@link org.apache.hadoop.hdfs.server.namenode.NameNode#getBlockLocations(String,long,long)}
    * @return  number of operations
    */
   int getNumGetBlockLocations();

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockCommand.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockCommand.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockCommand.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java Thu Jul  3 15:55:06 2008
@@ -15,77 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.protocol;
 
 import java.io.*;
 import java.util.List;
 
-import org.apache.hadoop.dfs.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.io.*;
 
-abstract class DatanodeCommand implements Writable {
-  static class Register extends DatanodeCommand {
-    private Register() {super(DatanodeProtocol.DNA_REGISTER);}
-    public void readFields(DataInput in) {}
-    public void write(DataOutput out) {}
-  }
-
-  static class BlockReport extends DatanodeCommand {
-    private BlockReport() {super(DatanodeProtocol.DNA_BLOCKREPORT);}
-    public void readFields(DataInput in) {}
-    public void write(DataOutput out) {}
-  }
-
-  static class Finalize extends DatanodeCommand {
-    private Finalize() {super(DatanodeProtocol.DNA_FINALIZE);}
-    public void readFields(DataInput in) {}
-    public void write(DataOutput out) {}
-  }
-
-  static {                                      // register a ctor
-    WritableFactories.setFactory(Register.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new Register();}
-        });
-    WritableFactories.setFactory(BlockReport.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new BlockReport();}
-        });
-    WritableFactories.setFactory(Finalize.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new Finalize();}
-        });
-  }
-
-  static final DatanodeCommand REGISTER = new Register();
-  static final DatanodeCommand BLOCKREPORT = new BlockReport();
-  static final DatanodeCommand FINALIZE = new Finalize();
-
-  private int action;
-  
-  public DatanodeCommand() {
-    this(DatanodeProtocol.DNA_UNKNOWN);
-  }
-  
-  DatanodeCommand(int action) {
-    this.action = action;
-  }
-
-  int getAction() {
-    return this.action;
-  }
-  
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(this.action);
-  }
-  
-  public void readFields(DataInput in) throws IOException {
-    this.action = in.readInt();
-  }
-}
 
 /****************************************************
  * A BlockCommand is an instruction to a datanode 
@@ -95,7 +34,7 @@
  * another DataNode.
  * 
  ****************************************************/
-class BlockCommand extends DatanodeCommand {
+public class BlockCommand extends DatanodeCommand {
   Block blocks[];
   DatanodeInfo targets[][];
 
@@ -103,9 +42,9 @@
 
   /**
    * Create BlockCommand for transferring blocks to another datanode
-   * @param blocks    blocks to be transferred 
+   * @param blocktargetlist    blocks to be transferred 
    */
-  BlockCommand(int action, List<BlockTargetPair> blocktargetlist) {
+  public BlockCommand(int action, List<BlockTargetPair> blocktargetlist) {
     super(action);
 
     blocks = new Block[blocktargetlist.size()]; 
@@ -123,17 +62,17 @@
    * Create BlockCommand for the given action
    * @param blocks blocks related to the action
    */
-  BlockCommand(int action, Block blocks[]) {
+  public BlockCommand(int action, Block blocks[]) {
     super(action);
     this.blocks = blocks;
     this.targets = EMPTY_TARGET;
   }
 
-  Block[] getBlocks() {
+  public Block[] getBlocks() {
     return blocks;
   }
 
-  DatanodeInfo[][] getTargets() {
+  public DatanodeInfo[][] getTargets() {
     return targets;
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockMetaDataInfo.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockMetaDataInfo.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockMetaDataInfo.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java Thu Jul  3 15:55:06 2008
@@ -15,16 +15,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.protocol;
 
 import java.io.*;
 
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.io.*;
 
 /**
  * Meta data information for a block
  */
-class BlockMetaDataInfo extends Block {
+public class BlockMetaDataInfo extends Block {
   static final WritableFactory FACTORY = new WritableFactory() {
     public Writable newInstance() { return new BlockMetaDataInfo(); }
   };
@@ -36,12 +37,12 @@
 
   public BlockMetaDataInfo() {}
 
-  BlockMetaDataInfo(Block b, long lastScanTime) {
+  public BlockMetaDataInfo(Block b, long lastScanTime) {
     super(b);
     this.lastScanTime = lastScanTime;
   }
 
-  long getLastScanTime() {return lastScanTime;}
+  public long getLastScanTime() {return lastScanTime;}
 
   /** {@inheritDoc} */
   public void write(DataOutput out) throws IOException {

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java?rev=673857&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java Thu Jul  3 15:55:06 2008
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.io.*;
+
+import org.apache.hadoop.io.*;
+
+public abstract class DatanodeCommand implements Writable {
+  static class Register extends DatanodeCommand {
+    private Register() {super(DatanodeProtocol.DNA_REGISTER);}
+    public void readFields(DataInput in) {}
+    public void write(DataOutput out) {}
+  }
+
+  static class BlockReport extends DatanodeCommand {
+    private BlockReport() {super(DatanodeProtocol.DNA_BLOCKREPORT);}
+    public void readFields(DataInput in) {}
+    public void write(DataOutput out) {}
+  }
+
+  static class Finalize extends DatanodeCommand {
+    private Finalize() {super(DatanodeProtocol.DNA_FINALIZE);}
+    public void readFields(DataInput in) {}
+    public void write(DataOutput out) {}
+  }
+
+  static {                                      // register a ctor
+    WritableFactories.setFactory(Register.class,
+        new WritableFactory() {
+          public Writable newInstance() {return new Register();}
+        });
+    WritableFactories.setFactory(BlockReport.class,
+        new WritableFactory() {
+          public Writable newInstance() {return new BlockReport();}
+        });
+    WritableFactories.setFactory(Finalize.class,
+        new WritableFactory() {
+          public Writable newInstance() {return new Finalize();}
+        });
+  }
+
+  public static final DatanodeCommand REGISTER = new Register();
+  public static final DatanodeCommand BLOCKREPORT = new BlockReport();
+  public static final DatanodeCommand FINALIZE = new Finalize();
+
+  private int action;
+  
+  public DatanodeCommand() {
+    this(DatanodeProtocol.DNA_UNKNOWN);
+  }
+  
+  DatanodeCommand(int action) {
+    this.action = action;
+  }
+
+  public int getAction() {
+    return this.action;
+  }
+  
+  ///////////////////////////////////////////
+  // Writable
+  ///////////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(this.action);
+  }
+  
+  public void readFields(DataInput in) throws IOException {
+    this.action = in.readInt();
+  }
+}
\ No newline at end of file

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeProtocol.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeProtocol.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeProtocol.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Thu Jul  3 15:55:06 2008
@@ -16,9 +16,13 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.protocol;
 
 import java.io.*;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.VersionedProtocol;
 
 /**********************************************************************
@@ -29,7 +33,7 @@
  * returning values from these functions.
  *
  **********************************************************************/
-interface DatanodeProtocol extends VersionedProtocol {
+public interface DatanodeProtocol extends VersionedProtocol {
   /**
    * 16: Block parameter added to nextGenerationStamp().
    */
@@ -56,10 +60,10 @@
   /** 
    * Register Datanode.
    *
-   * @see org.apache.hadoop.dfs.DataNode#register()
-   * @see org.apache.hadoop.dfs.FSNamesystem#registerDatanode(DatanodeRegistration)
+   * @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
+   * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
    * 
-   * @return updated {@link org.apache.hadoop.dfs.DatanodeRegistration}, which contains 
+   * @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains 
    * new storageID if the datanode did not have one and
    * registration ID for further communication.
    */
@@ -130,7 +134,8 @@
   UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException;
   
   /**
-   * same as {@link ClientProtocol#reportBadBlocks(LocatedBlock[] blocks)}
+   * same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
+   * }
    */
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
   

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeRegistration.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeRegistration.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeRegistration.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Thu Jul  3 15:55:06 2008
@@ -16,12 +16,16 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.protocol;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
@@ -32,7 +36,7 @@
  * This information is sent by Datanode with each communication request.
  * 
  */
-class DatanodeRegistration extends DatanodeID implements Writable {
+public class DatanodeRegistration extends DatanodeID implements Writable {
   static {                                      // register a ctor
     WritableFactories.setFactory
       (DatanodeRegistration.class,
@@ -41,7 +45,7 @@
        });
   }
 
-  StorageInfo storageInfo;
+  public StorageInfo storageInfo;
 
   /**
    * Default constructor.
@@ -58,20 +62,20 @@
     this.storageInfo = new StorageInfo();
   }
   
-  void setInfoPort(int infoPort) {
+  public void setInfoPort(int infoPort) {
     this.infoPort = infoPort;
   }
   
-  void setIpcPort(int ipcPort) {
+  public void setIpcPort(int ipcPort) {
     this.ipcPort = ipcPort;
   }
 
-  void setStorageInfo(DataStorage storage) {
+  public void setStorageInfo(DataStorage storage) {
     this.storageInfo = new StorageInfo(storage);
     this.storageID = storage.getStorageID();
   }
   
-  void setName(String name) {
+  public void setName(String name) {
     this.name = name;
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DisallowedDatanodeException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DisallowedDatanodeException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DisallowedDatanodeException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java Thu Jul  3 15:55:06 2008
@@ -16,10 +16,12 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.protocol;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+
 
 /**
  * This exception is thrown when a datanode tries to register or communicate
@@ -27,7 +29,7 @@
  * or has been specifically excluded.
  * 
  */
-class DisallowedDatanodeException extends IOException {
+public class DisallowedDatanodeException extends IOException {
 
   public DisallowedDatanodeException(DatanodeID nodeID) {
     super("Datanode denied communication with namenode: " + nodeID.getName());

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/InterDatanodeProtocol.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/InterDatanodeProtocol.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/InterDatanodeProtocol.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Thu Jul  3 15:55:06 2008
@@ -16,17 +16,18 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.protocol;
 
 import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.ipc.VersionedProtocol;
 
 /** An inter-datanode protocol for updating generation stamp
  */
-interface InterDatanodeProtocol extends VersionedProtocol {
+public interface InterDatanodeProtocol extends VersionedProtocol {
   public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
 
   /**



Mime
View raw message