hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r673857 [3/6] - in /hadoop/core/trunk: ./ bin/ conf/ docs/ src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ src/contrib/index/src/java/org/apache/hadoop/contri...
Date Thu, 03 Jul 2008 22:55:18 GMT
Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeBlockInfo.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeBlockInfo.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeBlockInfo.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java Thu Jul  3 15:55:06 2008
@@ -15,13 +15,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import org.apache.hadoop.dfs.FSDataset.FSVolume;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil.HardLink;
 import org.apache.hadoop.io.IOUtils;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDataset.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDataset.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDataset.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.*;
 import java.util.*;
@@ -25,19 +25,24 @@
 import javax.management.StandardMBean;
 
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.conf.*;
-import org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 
 /**************************************************
  * FSDataset manages a set of data blocks.  Each block
  * has a unique name and an extent on disk.
  *
  ***************************************************/
-class FSDataset implements FSConstants, FSDatasetInterface {
+public class FSDataset implements FSConstants, FSDatasetInterface {
 
 
   /**
@@ -602,8 +607,8 @@
     }
   }
 
-  File findBlockFile(Block b) {
-    assert b.generationStamp == GenerationStamp.WILDCARD_STAMP;
+  public File findBlockFile(Block b) {
+    assert b.getGenerationStamp() == GenerationStamp.WILDCARD_STAMP;
 
     File blockfile = null;
     ActiveFile activefile = ongoingCreates.get(b);
@@ -630,8 +635,8 @@
       return null;
     }
     File metafile = findMetaFile(blockfile);
-    b.generationStamp = parseGenerationStamp(blockfile, metafile);
-    b.len = blockfile.length();
+    b.setGenerationStamp(parseGenerationStamp(blockfile, metafile));
+    b.setNumBytes(blockfile.length());
     return b;
   }
 
@@ -703,7 +708,7 @@
   /**
    * Get File name for a given block.
    */
-  protected synchronized File getBlockFile(Block b) throws IOException {
+  public synchronized File getBlockFile(Block b) throws IOException {
     File f = validateBlockFile(b);
     if(f == null) {
       if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
@@ -738,12 +743,12 @@
    * Make a copy of the block if this block is linked to an existing
    * snapshot. This ensures that modifying this block does not modify
    * data in any existing snapshots.
-   * @param b Block
+   * @param block Block
    * @param numLinks Detach if the number of links exceed this value
    * @throws IOException
    * @return - true if the specified block was detached
    */
-  boolean detachBlock(Block block, int numLinks) throws IOException {
+  public boolean detachBlock(Block block, int numLinks) throws IOException {
     DatanodeBlockInfo info = null;
 
     synchronized (this) {
@@ -801,19 +806,19 @@
     }
 
     //update generation stamp
-    if (oldgs > newblock.generationStamp) {
-      throw new IOException("Cannot update block (id=" + newblock.blkid
+    if (oldgs > newblock.getGenerationStamp()) {
+      throw new IOException("Cannot update block (id=" + newblock.getBlockId()
           + ") generation stamp from " + oldgs
-          + " to " + newblock.generationStamp);
+          + " to " + newblock.getGenerationStamp());
     }
     
     //update length
-    if (newblock.len > oldblock.len) {
+    if (newblock.getNumBytes() > oldblock.getNumBytes()) {
       throw new IOException("Cannot update block file (=" + blockFile
-          + ") length from " + oldblock.len + " to " + newblock.len);
+          + ") length from " + oldblock.getNumBytes() + " to " + newblock.getNumBytes());
     }
-    if (newblock.len < oldblock.len) {
-      truncateBlock(blockFile, tmpMetaFile, oldblock.len, newblock.len);
+    if (newblock.getNumBytes() < oldblock.getNumBytes()) {
+      truncateBlock(blockFile, tmpMetaFile, oldblock.getNumBytes(), newblock.getNumBytes());
     }
 
     //rename the tmp file to the new meta file (with new generation stamp)
@@ -1146,7 +1151,7 @@
   /**
    * Turn the block identifier into a filename.
    */
-  synchronized File getFile(Block b) {
+  public synchronized File getFile(Block b) {
     DatanodeBlockInfo info = volumeMap.get(b);
     if (info != null) {
       return info.getFile();

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDatasetInterface.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDatasetInterface.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDatasetInterface.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 
 import java.io.FilterInputStream;
@@ -26,7 +26,8 @@
 
 
 
-import org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
 /**

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/GenerationStampUpgradeDatanode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampUpgradeDatanode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/GenerationStampUpgradeDatanode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/GenerationStampUpgradeDatanode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampUpgradeDatanode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampUpgradeDatanode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/GenerationStampUpgradeDatanode.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.*;
 import java.util.*;
@@ -29,6 +29,12 @@
 import java.net.SocketTimeoutException;
 
 import org.apache.commons.logging.*;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.GenerationStampStatsUpgradeCommand;
+import org.apache.hadoop.hdfs.server.namenode.GenerationStampUpgradeNamenode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.retry.*;
 import org.apache.hadoop.util.StringUtils;
@@ -41,10 +47,10 @@
  * generation stamp is written to each metadata file. Please see
  * HADOOP-1700 for details.
  */
-class GenerationStampUpgradeDatanode extends UpgradeObjectDatanode {
+public class GenerationStampUpgradeDatanode extends UpgradeObjectDatanode {
 
   public static final Log LOG = 
-    LogFactory.getLog("org.apache.hadoop.dfs.GenerationStampUpgrade");
+    LogFactory.getLog(GenerationStampUpgradeDatanode.class.getName());
 
   DatanodeProtocol namenode;
   InetSocketAddress namenodeAddr;
@@ -223,7 +229,7 @@
   // This method iterates through all the blocks on a datanode and
   // do the upgrade.
   //
-  void doUpgrade() throws IOException {
+  public void doUpgrade() throws IOException {
     
     if (upgradeCompleted.get()) {
       assert offlineUpgrade.get() : 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManagerDatanode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManagerDatanode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManagerDatanode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java Thu Jul  3 15:55:06 2008
@@ -15,10 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.UpgradeManager;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.util.Daemon;
 
 /**
@@ -63,7 +67,7 @@
    * @return true if distributed upgrade is required or false otherwise
    * @throws IOException
    */
-  synchronized boolean startUpgrade() throws IOException {
+  public synchronized boolean startUpgrade() throws IOException {
     if(upgradeState) {  // upgrade is already in progress
       assert currentUpgrades != null : 
         "UpgradeManagerDatanode.currentUpgrades is null.";
@@ -125,7 +129,7 @@
         + "The upgrade object is not defined.");
   }
 
-  synchronized void completeUpgrade() throws IOException {
+  public synchronized void completeUpgrade() throws IOException {
     assert currentUpgrades != null : 
       "UpgradeManagerDatanode.currentUpgrades is null.";
     UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectDatanode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectDatanode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectDatanode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java Thu Jul  3 15:55:06 2008
@@ -15,8 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.UpgradeObject;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.util.StringUtils;
 import java.io.IOException;
 import java.net.SocketTimeoutException;
@@ -25,7 +30,7 @@
  * Base class for data-node upgrade objects.
  * Data-node upgrades are run in separate threads.
  */
-abstract class UpgradeObjectDatanode extends UpgradeObject implements Runnable {
+public abstract class UpgradeObjectDatanode extends UpgradeObject implements Runnable {
   private DataNode dataNode = null;
 
   public FSConstants.NodeType getType() {
@@ -44,7 +49,7 @@
    * Specifies how the upgrade is performed. 
    * @throws IOException
    */
-  abstract void doUpgrade() throws IOException;
+  public abstract void doUpgrade() throws IOException;
 
   /**
    * Specifies what to do before the upgrade is started.

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=673857&r1=673837&r2=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs.datanode.metrics;
+package org.apache.hadoop.hdfs.server.datanode.metrics;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics.MetricsContext;

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeStatistics.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeStatistics.java?rev=673857&r1=673837&r2=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeStatistics.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeStatistics.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs.datanode.metrics;
+package org.apache.hadoop.hdfs.server.datanode.metrics;
 
 import java.util.Random;
 

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeStatisticsMBean.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeStatisticsMBean.java?rev=673857&r1=673837&r2=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeStatisticsMBean.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeStatisticsMBean.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs.datanode.metrics;
+package org.apache.hadoop.hdfs.server.datanode.metrics;
 
 /**
  * 
@@ -39,7 +39,7 @@
  * The context with the update thread is used to average the data periodically.
  * <p>
  * Name Node Status info is reported in another MBean
- * @see org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean
+ * @see org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean
  *
  */
 public interface DataNodeStatisticsMBean {

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java?rev=673857&r1=673837&r2=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs.datanode.metrics;
+package org.apache.hadoop.hdfs.server.datanode.metrics;
 
 import java.io.IOException;
 
@@ -27,7 +27,7 @@
  * convention.) 
  * <p>
  * Data Node runtime statistic  info is report in another MBean
- * @see org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean
+ * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean
  *
  */
 public interface FSDatasetMBean {

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlocksMap.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlocksMap.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlocksMap.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java Thu Jul  3 15:55:06 2008
@@ -15,21 +15,23 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.*;
 
+import org.apache.hadoop.hdfs.protocol.Block;
+
 /**
  * This class maintains the map from a block to its metadata.
  * block's metadata currently includes INode it belongs to and
  * the datanodes that store the block.
  */
-class BlocksMap {
+public class BlocksMap {
         
   /**
    * Internal class for block metadata.
    */
-  static class BlockInfo extends Block {
+  public static class BlockInfo extends Block {
     private INodeFile          inode;
 
     /**
@@ -42,7 +44,7 @@
      */
     private Object[] triplets;
 
-    BlockInfo(Block blk, int replication) {
+    public BlockInfo(Block blk, int replication) {
       super(blk);
       this.triplets = new Object[3*replication];
       this.inode = null;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/CheckpointSignature.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/CheckpointSignature.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/CheckpointSignature.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java Thu Jul  3 15:55:06 2008
@@ -15,18 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.io.WritableComparable;
 
 /**
  * A unique signature intended to identify checkpoint transactions.
  */
-class CheckpointSignature extends StorageInfo 
+public class CheckpointSignature extends StorageInfo 
                       implements WritableComparable<CheckpointSignature> {
   private static final String FIELD_SEPARATOR = ":";
   long editsTime = -1L;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/CorruptReplicasMap.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/CorruptReplicasMap.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/CorruptReplicasMap.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java Thu Jul  3 15:55:06 2008
@@ -15,8 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.ipc.Server;
 
 import java.util.*;
@@ -30,7 +31,7 @@
  * Mapping: Block -> TreeSet<DatanodeDescriptor> 
  */
 
-class CorruptReplicasMap{
+public class CorruptReplicasMap{
 
   private Map<Block, Collection<DatanodeDescriptor>> corruptReplicasMap =
     new TreeMap<Block, Collection<DatanodeDescriptor>>();
@@ -99,7 +100,7 @@
     return ((nodes != null) && (nodes.contains(node)));
   }
 
-  int numCorruptReplicas(Block blk) {
+  public int numCorruptReplicas(Block blk) {
     Collection<DatanodeDescriptor> nodes = getNodes(blk);
     return (nodes == null) ? 0 : nodes.size();
   }

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeDescriptor.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeDescriptor.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeDescriptor.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java Thu Jul  3 15:55:06 2008
@@ -15,13 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.DataInput;
 import java.io.IOException;
 import java.util.*;
 
-import org.apache.hadoop.dfs.BlocksMap.BlockInfo;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.UTF8;
 import org.apache.hadoop.io.WritableUtils;
@@ -39,9 +45,9 @@
  **************************************************/
 public class DatanodeDescriptor extends DatanodeInfo {
   /** Block and targets pair */
-  static class BlockTargetPair {
-    final Block block;
-    final DatanodeDescriptor[] targets;    
+  public static class BlockTargetPair {
+    public final Block block;
+    public final DatanodeDescriptor[] targets;    
 
     BlockTargetPair(Block block, DatanodeDescriptor[] targets) {
       this.block = block;
@@ -198,7 +204,7 @@
     this.blockList = null;
   }
 
-  int numBlocks() {
+  public int numBlocks() {
     return blockList == null ? 0 : blockList.listCount(this);
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DfsServlet.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DfsServlet.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DfsServlet.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
@@ -25,6 +25,8 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.*;
 import org.znerd.xmlenc.XMLOutputter;
@@ -74,4 +76,4 @@
     doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim());
     doc.endTag();
   }
-}
\ No newline at end of file
+}

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDirectory.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDirectory.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDirectory.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.*;
 import java.util.*;
@@ -27,7 +27,11 @@
 import org.apache.hadoop.metrics.MetricsRecord;
 import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.dfs.BlocksMap.BlockInfo;
+import org.apache.hadoop.hdfs.protocol.DFSFileInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 /*************************************************
  * FSDirectory stores the filesystem directory state.

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.BufferedInputStream;
 import java.io.DataInput;
@@ -32,6 +32,11 @@
 import java.lang.Math;
 import java.nio.channels.FileChannel;
 
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.fs.permission.*;
 
@@ -39,7 +44,7 @@
  * FSEditLog maintains a log of the namespace modifications.
  * 
  */
-class FSEditLog {
+public class FSEditLog {
   private static final byte OP_ADD = 0;
   private static final byte OP_RENAME = 1;  // rename
   private static final byte OP_DELETE = 2;  // delete
@@ -244,7 +249,7 @@
    * 
    * @throws IOException
    */
-  synchronized void open() throws IOException {
+  public synchronized void open() throws IOException {
     numTransactions = totalTimeTransactions = 0;
     int size = getNumStorageDirs();
     if (editStreams == null)
@@ -262,7 +267,7 @@
     }
   }
 
-  synchronized void createEditLogFile(File name) throws IOException {
+  public synchronized void createEditLogFile(File name) throws IOException {
     EditLogOutputStream eStream = new EditLogOutputStream(name);
     eStream.create();
     eStream.close();
@@ -282,7 +287,7 @@
   /**
    * Shutdown the filestore
    */
-  synchronized void close() throws IOException {
+  public synchronized void close() throws IOException {
     while (isSyncRunning) {
       try {
         wait(1000);
@@ -740,7 +745,7 @@
   //
   // Sync all modifications done by this thread.
   //
-  void logSync() {
+  public void logSync() {
     ArrayList<EditLogOutputStream> errorStreams = null;
     long syncStart = 0;
 
@@ -840,7 +845,7 @@
    * Add open lease record to edit log. 
    * Records the block locations of the last block.
    */
-  void logOpenFile(String path, INodeFileUnderConstruction newNode) 
+  public void logOpenFile(String path, INodeFileUnderConstruction newNode) 
                    throws IOException {
 
     UTF8 nameReplicationPair[] = new UTF8[] { 
@@ -859,7 +864,7 @@
   /** 
    * Add close lease record to edit log.
    */
-  void logCloseFile(String path, INodeFile newNode) {
+  public void logCloseFile(String path, INodeFile newNode) {
     UTF8 nameReplicationPair[] = new UTF8[] {
       new UTF8(path),
       FSEditLog.toLogReplication(newNode.getReplication()),
@@ -874,7 +879,7 @@
   /** 
    * Add create directory record to edit log
    */
-  void logMkDir(String path, INode newNode) {
+  public void logMkDir(String path, INode newNode) {
     UTF8 info[] = new UTF8[] {
       new UTF8(path),
       FSEditLog.toLogLong(newNode.getModificationTime())

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSImage.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSImage.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSImage.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
@@ -43,18 +43,26 @@
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.FSConstants.CheckpointStates;
-import org.apache.hadoop.dfs.FSConstants.StartupOption;
-import org.apache.hadoop.dfs.FSConstants.NodeType;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.CheckpointStates;
+import org.apache.hadoop.hdfs.protocol.FSConstants.StartupOption;
+import org.apache.hadoop.hdfs.protocol.FSConstants.NodeType;
 import org.apache.hadoop.io.UTF8;
 import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.dfs.BlocksMap.BlockInfo;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
+import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.common.UpgradeManager;
 
 /**
  * FSImage handles checkpointing and logging of the namespace edits.
  * 
  */
-class FSImage extends Storage {
+public class FSImage extends Storage {
 
   private static final SimpleDateFormat DATE_FORM =
     new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
@@ -107,14 +115,14 @@
     setStorageDirectories(fsDirs);
   }
 
-  FSImage(StorageInfo storageInfo) {
+  public FSImage(StorageInfo storageInfo) {
     super(NodeType.NAME_NODE, storageInfo);
   }
 
   /**
    * Represents an Image (image and edit file).
    */
-  FSImage(File imageDir) throws IOException {
+  public FSImage(File imageDir) throws IOException {
     this();
     ArrayList<File> dirs = new ArrayList<File>(1);
     dirs.add(imageDir);
@@ -556,11 +564,11 @@
     storageDirs.remove(index);
   }
 
-  FSEditLog getEditLog() {
+  public FSEditLog getEditLog() {
     return editLog;
   }
 
-  boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+  public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
     File oldImageDir = new File(sd.root, "image");
     if (!oldImageDir.exists()) {
       if(sd.getVersionFile().exists())
@@ -888,7 +896,7 @@
    * Save the contents of the FS image
    * and create empty edits.
    */
-  void saveFSImage() throws IOException {
+  public void saveFSImage() throws IOException {
     editLog.createNewIfMissing();
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
       StorageDirectory sd = getStorageDir(idx);
@@ -1226,7 +1234,7 @@
     return getImageFile(0, NameNodeFile.IMAGE);
   }
 
-  File getFsEditName() throws IOException {
+  public File getFsEditName() throws IOException {
     return getEditLog().getFsEditName();
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jul  3 15:55:06 2008
@@ -15,14 +15,20 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.commons.logging.*;
 
 import org.apache.hadoop.conf.*;
-import org.apache.hadoop.dfs.BlocksMap.BlockInfo;
-import org.apache.hadoop.dfs.BlocksWithLocations.BlockWithLocations;
-import org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
+import org.apache.hadoop.hdfs.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
+import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.*;
@@ -32,7 +38,12 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.ScriptBasedMapping;
-import org.apache.hadoop.dfs.LeaseManager.Lease;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -68,7 +79,7 @@
  * 4)  machine --> blocklist (inverted #2)
  * 5)  LRU cache of updated-heartbeat machines
  ***************************************************/
-class FSNamesystem implements FSConstants, FSNamesystemMBean {
+public class FSNamesystem implements FSConstants, FSNamesystemMBean {
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.FSNamesystem");
   public static final String AUDIT_FORMAT =
     "ugi=%s\t" +  // ugi
@@ -94,7 +105,7 @@
   //
   // Stores the correct file name hierarchy
   //
-  FSDirectory dir;
+  public FSDirectory dir;
 
   //
   // Mapping: Block -> { INode, datanodes, self ref } 
@@ -105,7 +116,7 @@
   //
   // Store blocks-->datanodedescriptor(s) map of corrupt replicas
   //
-  CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
+  public CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
     
   /**
    * Stores the datanode -> block map.  
@@ -176,16 +187,16 @@
   private UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
   private PendingReplicationBlocks pendingReplications;
 
-  LeaseManager leaseManager = new LeaseManager(this); 
+  public LeaseManager leaseManager = new LeaseManager(this); 
 
   //
   // Threaded object that checks to see if we have been
   // getting heartbeats from all clients. 
   //
   Daemon hbthread = null;   // HeartbeatMonitor thread
-  Daemon lmthread = null;   // LeaseMonitor thread
+  public Daemon lmthread = null;   // LeaseMonitor thread
   Daemon smmthread = null;  // SafeModeMonitor thread
-  Daemon replthread = null;  // Replication thread
+  public Daemon replthread = null;  // Replication thread
   Daemon resthread = null; //ResolutionMonitor thread
   
   volatile boolean fsRunning = true;
@@ -336,7 +347,7 @@
     LOG.info("Web-server up at: " + infoHost + ":" + infoPort);
   }
 
-  static Collection<File> getNamespaceDirs(Configuration conf) {
+  public static Collection<File> getNamespaceDirs(Configuration conf) {
     Collection<String> dirNames = conf.getStringCollection("dfs.name.dir");
     if (dirNames.isEmpty())
       dirNames.add("/tmp/hadoop/dfs/name");
@@ -698,7 +709,7 @@
    * Get block locations within the specified range.
    * @see ClientProtocol#getBlockLocations(String, long, long)
    */
-  LocatedBlocks getBlockLocations(String src, long offset, long length
+  public LocatedBlocks getBlockLocations(String src, long offset, long length
       ) throws IOException {
     if (offset < 0) {
       throw new IOException("Negative offset is not supported. File: " + src );
@@ -729,7 +740,7 @@
       return null;
     }
     if (blocks.length == 0) {
-      return new LocatedBlocks(inode, new ArrayList<LocatedBlock>(blocks.length));
+      return inode.createLocatedBlocks(new ArrayList<LocatedBlock>(blocks.length));
     }
     List<LocatedBlock> results;
     results = new ArrayList<LocatedBlock>(blocks.length);
@@ -780,7 +791,7 @@
           && curBlk < blocks.length 
           && results.size() < nrBlocksToReturn);
     
-    return new LocatedBlocks(inode, results);
+    return inode.createLocatedBlocks(results);
   }
 
   /**
@@ -909,7 +920,7 @@
                                   +src+" for "+holder+" at "+clientMachine);
     if (isInSafeMode())
       throw new SafeModeException("Cannot create file" + src, safeMode);
-    if (!isValidName(src)) {
+    if (!DFSUtil.isValidName(src)) {
       throw new IOException("Invalid file name: " + src);
     }
     if (isPermissionEnabled) {
@@ -1019,7 +1030,7 @@
     }
     if (isInSafeMode())
       throw new SafeModeException("Cannot append file" + src, safeMode);
-    if (!isValidName(src)) {
+    if (!DFSUtil.isValidName(src)) {
       throw new IOException("Invalid file name: " + src);
     }
     if (isPermissionEnabled) {
@@ -1326,7 +1337,7 @@
   /**
    * Mark the block belonging to datanode as corrupt
    * @param blk Block to be marked as corrupt
-   * @param datanode Datanode which holds the corrupt replica
+   * @param dn Datanode which holds the corrupt replica
    */
   public synchronized void markBlockAsCorrupt(Block blk, DatanodeInfo dn)
     throws IOException {
@@ -1410,7 +1421,7 @@
     NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src + " to " + dst);
     if (isInSafeMode())
       throw new SafeModeException("Cannot rename " + src, safeMode);
-    if (!isValidName(dst)) {
+    if (!DFSUtil.isValidName(dst)) {
       throw new IOException("Invalid name: " + dst);
     }
 
@@ -1498,30 +1509,6 @@
   }
 
   /**
-   * Whether the pathname is valid.  Currently prohibits relative paths, 
-   * and names which contain a ":" or "/" 
-   */
-  static boolean isValidName(String src) {
-      
-    // Path must be absolute.
-    if (!src.startsWith(Path.SEPARATOR)) {
-      return false;
-    }
-      
-    // Check for ".." "." ":" "/"
-    StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
-    while(tokens.hasMoreTokens()) {
-      String element = tokens.nextToken();
-      if (element.equals("..") || 
-          element.equals(".")  ||
-          (element.indexOf(":") >= 0)  ||
-          (element.indexOf("/") >= 0)) {
-        return false;
-      }
-    }
-    return true;
-  }
-  /**
    * Create all the necessary directories
    */
   public boolean mkdirs(String src, PermissionStatus permissions
@@ -1555,7 +1542,7 @@
     }
     if (isInSafeMode())
       throw new SafeModeException("Cannot create directory " + src, safeMode);
-    if (!isValidName(src)) {
+    if (!DFSUtil.isValidName(src)) {
       throw new IOException("Invalid directory name: " + src);
     }
     if (isPermissionEnabled) {
@@ -1728,7 +1715,7 @@
     }
     else {
       // update last block, construct newblockinfo and add it to the blocks map
-      lastblock.set(lastblock.blkid, newlength, newgenerationstamp);
+      lastblock.set(lastblock.getBlockId(), newlength, newgenerationstamp);
       final BlockInfo newblockinfo = blocksMap.addINode(lastblock, pendingFile);
     
       //update block info
@@ -1897,7 +1884,7 @@
    * namespaceID and will continue serving the datanodes that has previously
    * registered with the namenode without restarting the whole cluster.
    * 
-   * @see DataNode#register()
+   * @see org.apache.hadoop.hdfs.server.datanode.DataNode#register()
    */
   public synchronized void registerDatanode(DatanodeRegistration nodeReg
                                             ) throws IOException {
@@ -2194,7 +2181,7 @@
    * 
    * @return number of blocks scheduled for replication or removal.
    */
-  int computeDatanodeWork() throws IOException {
+  public int computeDatanodeWork() throws IOException {
     int workFound = 0;
     int blocksToProcess = 0;
     int nodesToProcess = 0;
@@ -2463,7 +2450,7 @@
     return blocksToInvalidate.size();
   }
 
-  void setNodeReplicationLimit(int limit) {
+  public void setNodeReplicationLimit(int limit) {
     this.maxReplicationStreams = limit;
   }
 
@@ -3306,7 +3293,7 @@
     throws IOException {
 
     if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
-      LOG.info("Start Decommissioning node " + node.name);
+      LOG.info("Start Decommissioning node " + node.getName());
       node.startDecommission();
       //
       // all the blocks that reside on this node have to be 
@@ -3324,7 +3311,7 @@
    */
   public void stopDecommission (DatanodeDescriptor node) 
     throws IOException {
-    LOG.info("Stop Decommissioning node " + node.name);
+    LOG.info("Stop Decommissioning node " + node.getName());
     node.stopDecommission();
   }
 
@@ -3467,7 +3454,7 @@
     if (node.isDecommissionInProgress()) {
       if (!isReplicationInProgress(node)) {
         node.setDecommissioned();
-        LOG.info("Decommission complete for node " + node.name);
+        LOG.info("Decommission complete for node " + node.getName());
       }
     }
     if (node.isDecommissioned()) {
@@ -4331,14 +4318,14 @@
   /**
    * Sets the generation stamp for this filesystem
    */
-  void setGenerationStamp(long stamp) {
+  public void setGenerationStamp(long stamp) {
     generationStamp.setStamp(stamp);
   }
 
   /**
    * Gets the generation stamp for this filesystem
    */
-  long getGenerationStamp() {
+  public long getGenerationStamp() {
     return generationStamp.getStamp();
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FileDataServlet.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FileDataServlet.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FileDataServlet.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 import java.net.URI;
@@ -28,10 +28,15 @@
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DFSFileInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 
 /** Redirect queries about the hosted filesystem to an appropriate datanode.
- * @see org.apache.hadoop.dfs.HftpFileSystem
+ * @see org.apache.hadoop.hdfs.HftpFileSystem
  */
 public class FileDataServlet extends DfsServlet {
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FsckServlet.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FsckServlet.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FsckServlet.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.*;
 import java.io.*;
@@ -33,7 +33,7 @@
  */
 public class FsckServlet extends HttpServlet {
 
-  private static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.FSNamesystem");
+  private static final Log LOG = LogFactory.getLog(FSNamesystem.class.getName());
 
   @SuppressWarnings("unchecked")
   public void doGet(HttpServletRequest request,

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GenerationStampUpgradeNamenode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampUpgradeNamenode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GenerationStampUpgradeNamenode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GenerationStampUpgradeNamenode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampUpgradeNamenode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampUpgradeNamenode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GenerationStampUpgradeNamenode.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.*;
 import java.util.*;
@@ -28,6 +28,14 @@
 import java.net.InetSocketAddress;
 
 import org.apache.commons.logging.*;
+import org.apache.hadoop.hdfs.protocol.DFSFileInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.common.GenerationStampStatsUpgradeCommand;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.retry.*;
 import org.apache.hadoop.util.StringUtils;
@@ -44,18 +52,18 @@
  * Once an upgrade starts at the namenode , this class manages the upgrade 
  * process.
  */
-class GenerationStampUpgradeNamenode extends UpgradeObjectNamenode {
+public class GenerationStampUpgradeNamenode extends UpgradeObjectNamenode {
   
   public static final Log LOG = 
-    LogFactory.getLog("org.apache.hadoop.dfs.GenerationStampUpgradeNamenode");
+    LogFactory.getLog(GenerationStampUpgradeNamenode.class.getName());
   
   static final long inactivityExtension = 10*1000; // 10 seconds
   AtomicLong lastNodeCompletionTime = new AtomicLong(0);
 
   // The layout version before the generation stamp upgrade.
-  static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
+  public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
 
-  static final int DN_CMD_STATS = 300;
+  public static final int DN_CMD_STATS = 300;
   
   enum UpgradeStatus {
     INITIALIZED,
@@ -150,6 +158,7 @@
   }
 
   @Override
+  public
   UpgradeCommand processUpgradeCommand(UpgradeCommand command) 
                                            throws IOException {
     switch (command.getAction()) {

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GetImageServlet.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GetImageServlet.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GetImageServlet.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.*;
 import java.io.*;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Host2NodesMap.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Host2NodesMap.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Host2NodesMap.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.*;
 import java.util.concurrent.locks.ReadWriteLock;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/INode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/INode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/INode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.FileNotFoundException;
 import java.io.UnsupportedEncodingException;
@@ -28,14 +28,18 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.permission.*;
-import org.apache.hadoop.dfs.BlocksMap.BlockInfo;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 /**
  * We keep an in-memory representation of the file/block hierarchy.
  * This is a base INode class containing common fields for file and 
  * directory inodes.
  */
-abstract class INode implements Comparable<byte[]> {
+public abstract class INode implements Comparable<byte[]> {
   protected byte[] name;
   protected INodeDirectory parent;
   protected long modificationTime;
@@ -119,7 +123,7 @@
     permission = f.combine(n, permission);
   }
   /** Get user name */
-  protected String getUserName() {
+  public String getUserName() {
     int n = (int)PermissionStatusFormat.USER.retrieve(permission);
     return SerialNumberManager.INSTANCE.getUser(n);
   }
@@ -129,7 +133,7 @@
     updatePermissionStatus(PermissionStatusFormat.USER, n);
   }
   /** Get group name */
-  protected String getGroupName() {
+  public String getGroupName() {
     int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
     return SerialNumberManager.INSTANCE.getGroup(n);
   }
@@ -139,7 +143,7 @@
     updatePermissionStatus(PermissionStatusFormat.GROUP, n);
   }
   /** Get the {@link FsPermission} */
-  protected FsPermission getFsPermission() {
+  public FsPermission getFsPermission() {
     return new FsPermission(
         (short)PermissionStatusFormat.MODE.retrieve(permission));
   }
@@ -154,7 +158,7 @@
   /**
    * Check whether it's a directory
    */
-  abstract boolean isDirectory();
+  public abstract boolean isDirectory();
   /**
    * Collect all the blocks in all children of this INode.
    * Count and return the number of files in the sub tree.
@@ -163,7 +167,7 @@
   abstract int collectSubtreeBlocksAndClear(List<Block> v);
 
   /** Compute {@link ContentSummary}. */
-  final ContentSummary computeContentSummary() {
+  public final ContentSummary computeContentSummary() {
     long[] a = computeContentSummary(new long[]{0,0,0});
     return new ContentSummary(a[0], a[1], a[2], getQuota());
   }
@@ -237,7 +241,7 @@
    * Get last modification time of inode.
    * @return access time
    */
-  long getModificationTime() {
+  public long getModificationTime() {
     return this.modificationTime;
   }
 
@@ -368,699 +372,10 @@
     }
     return null;
   }
-}
-
-/**
- * Directory INode class.
- */
-class INodeDirectory extends INode {
-  protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
-  final static String ROOT_NAME = "";
-
-  private List<INode> children;
-
-  INodeDirectory(String name, PermissionStatus permissions) {
-    super(name, permissions);
-    this.children = null;
-  }
-
-  INodeDirectory(PermissionStatus permissions, long mTime) {
-    super(permissions, mTime);
-    this.children = null;
-  }
-
-  /** constructor */
-  INodeDirectory(byte[] localName, PermissionStatus permissions, long mTime) {
-    this(permissions, mTime);
-    this.name = localName;
-  }
-  
-  /** copy constructor
-   * 
-   * @param other
-   */
-  INodeDirectory(INodeDirectory other) {
-    super(other);
-    this.children = other.getChildren();
-  }
-  
-  /**
-   * Check whether it's a directory
-   */
-  boolean isDirectory() {
-    return true;
-  }
-
-  INode removeChild(INode node) {
-    assert children != null;
-    int low = Collections.binarySearch(children, node.name);
-    if (low >= 0) {
-      return children.remove(low);
-    } else {
-      return null;
-    }
-  }
-
-  /** Replace a child that has the same name as newChild by newChild.
-   * 
-   * @param newChild Child node to be added
-   */
-  void replaceChild(INode newChild) {
-    if ( children == null ) {
-      throw new IllegalArgumentException("The directory is empty");
-    }
-    int low = Collections.binarySearch(children, newChild.name);
-    if (low>=0) { // an old child exists so replace by the newChild
-      children.set(low, newChild);
-    } else {
-      throw new IllegalArgumentException("No child exists to be replaced");
-    }
-  }
-  
-  INode getChild(String name) {
-    return getChildINode(string2Bytes(name));
-  }
-
-  private INode getChildINode(byte[] name) {
-    if (children == null) {
-      return null;
-    }
-    int low = Collections.binarySearch(children, name);
-    if (low >= 0) {
-      return children.get(low);
-    }
-    return null;
-  }
-
-  /**
-   */
-  private INode getNode(byte[][] components) {
-    INode[] inode  = new INode[1];
-    getExistingPathINodes(components, inode);
-    return inode[0];
-  }
-
-  /**
-   * This is the external interface
-   */
-  INode getNode(String path) {
-    return getNode(getPathComponents(path));
-  }
-
-  /**
-   * Retrieve existing INodes from a path. If existing is big enough to store
-   * all path components (existing and non-existing), then existing INodes
-   * will be stored starting from the root INode into existing[0]; if
-   * existing is not big enough to store all path components, then only the
-   * last existing and non existing INodes will be stored so that
-   * existing[existing.length-1] refers to the target INode.
-   * 
-   * <p>
-   * Example: <br>
-   * Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
-   * following path components: ["","c1","c2","c3"],
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?])</code> should fill the
-   * array with [c2] <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?])</code> should fill the
-   * array with [null]
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?,?])</code> should fill the
-   * array with [c1,c2] <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?])</code> should fill
-   * the array with [c2,null]
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?,?,?,?])</code> should fill
-   * the array with [rootINode,c1,c2,null], <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?])</code> should
-   * fill the array with [rootINode,c1,c2,null]
-   * @param components array of path component name
-   * @param existing INode array to fill with existing INodes
-   * @return number of existing INodes in the path
-   */
-  int getExistingPathINodes(byte[][] components, INode[] existing) {
-    assert compareBytes(this.name, components[0]) == 0 :
-      "Incorrect name " + getLocalName() + " expected " + components[0];
-
-    INode curNode = this;
-    int count = 0;
-    int index = existing.length - components.length;
-    if (index > 0)
-      index = 0;
-    while ((count < components.length) && (curNode != null)) {
-      if (index >= 0)
-        existing[index] = curNode;
-      if (!curNode.isDirectory() || (count == components.length - 1))
-        break; // no more child, stop here
-      INodeDirectory parentDir = (INodeDirectory)curNode;
-      curNode = parentDir.getChildINode(components[count + 1]);
-      count += 1;
-      index += 1;
-    }
-    return count;
-  }
-
-  /**
-   * Retrieve the existing INodes along the given path. The first INode
-   * always exist and is this INode.
-   * 
-   * @param path the path to explore
-   * @return INodes array containing the existing INodes in the order they
-   *         appear when following the path from the root INode to the
-   *         deepest INodes. The array size will be the number of expected
-   *         components in the path, and non existing components will be
-   *         filled with null
-   */
-  INode[] getExistingPathINodes(String path) {
-    byte[][] components = getPathComponents(path);
-    INode[] inodes = new INode[components.length];
-
-    this.getExistingPathINodes(components, inodes);
-    
-    return inodes;
-  }
-
-  /**
-   * Add a child inode to the directory.
-   * 
-   * @param node INode to insert
-   * @param inheritPermission inherit permission from parent?
-   * @return  null if the child with this name already exists; 
-   *          inserted INode, otherwise
-   */
-  <T extends INode> T addChild(final T node, boolean inheritPermission) {
-    if (inheritPermission) {
-      FsPermission p = getFsPermission();
-      //make sure the  permission has wx for the user
-      if (!p.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
-        p = new FsPermission(p.getUserAction().or(FsAction.WRITE_EXECUTE),
-            p.getGroupAction(), p.getOtherAction());
-      }
-      node.setPermission(p);
-    }
-
-    if (children == null) {
-      children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
-    }
-    int low = Collections.binarySearch(children, node.name);
-    if(low >= 0)
-      return null;
-    node.parent = this;
-    children.add(-low - 1, node);
-    // update modification time of the parent directory
-    setModificationTime(node.getModificationTime());
-    if (node.getGroupName() == null) {
-      node.setGroup(getGroupName());
-    }
-    return node;
-  }
-
-  /**
-   * Equivalent to addNode(path, newNode, false).
-   * @see #addNode(String, INode, boolean)
-   */
-  <T extends INode> T addNode(String path, T newNode) throws FileNotFoundException {
-    return addNode(path, newNode, false);
-  }
-  /**
-   * Add new INode to the file tree.
-   * Find the parent and insert 
-   * 
-   * @param path file path
-   * @param newNode INode to be added
-   * @param inheritPermission If true, copy the parent's permission to newNode.
-   * @return null if the node already exists; inserted INode, otherwise
-   * @throws FileNotFoundException if parent does not exist or 
-   * is not a directory.
-   */
-  <T extends INode> T addNode(String path, T newNode, boolean inheritPermission
-      ) throws FileNotFoundException {
-    if(addToParent(path, newNode, null, inheritPermission) == null)
-      return null;
-    return newNode;
-  }
-
-  /**
-   * Add new inode to the parent if specified.
-   * Optimized version of addNode() if parent is not null.
-   * 
-   * @return  parent INode if new inode is inserted
-   *          or null if it already exists.
-   * @throws  FileNotFoundException if parent does not exist or 
-   *          is not a directory.
-   */
-  <T extends INode> INodeDirectory addToParent(
-                                      String path,
-                                      T newNode,
-                                      INodeDirectory parent,
-                                      boolean inheritPermission
-                                    ) throws FileNotFoundException {
-    byte[][] pathComponents = getPathComponents(path);
-    assert pathComponents != null : "Incorrect path " + path;
-    int pathLen = pathComponents.length;
-    if (pathLen < 2)  // add root
-      return null;
-    if(parent == null) {
-      // Gets the parent INode
-      INode[] inodes  = new INode[2];
-      getExistingPathINodes(pathComponents, inodes);
-      INode inode = inodes[0];
-      if (inode == null) {
-        throw new FileNotFoundException("Parent path does not exist: "+path);
-      }
-      if (!inode.isDirectory()) {
-        throw new FileNotFoundException("Parent path is not a directory: "+path);
-      }
-      parent = (INodeDirectory)inode;
-    }
-    // insert into the parent children list
-    newNode.name = pathComponents[pathLen-1];
-    if(parent.addChild(newNode, inheritPermission) == null)
-      return null;
-    return parent;
-  }
-
-  /**
-   */
-  long numItemsInTree() {
-    long total = 1L;
-    if (children == null) {
-      return total;
-    }
-    for (INode child : children) {
-      total += child.numItemsInTree();
-    }
-    return total;
-  }
-
-  /** {@inheritDoc} */
-  long[] computeContentSummary(long[] summary) {
-    if (children != null) {
-      for (INode child : children) {
-        child.computeContentSummary(summary);
-      }
-    }
-    summary[2]++;
-    return summary;
-  }
-
-  /**
-   */
-  List<INode> getChildren() {
-    return children==null ? new ArrayList<INode>() : children;
-  }
-  List<INode> getChildrenRaw() {
-    return children;
-  }
-
-  int collectSubtreeBlocksAndClear(List<Block> v) {
-    int total = 1;
-    if (children == null) {
-      return total;
-    }
-    for (INode child : children) {
-      total += child.collectSubtreeBlocksAndClear(v);
-    }
-    parent = null;
-    children = null;
-    return total;
-  }
-}
-
-/**
- * Directory INode class that has a quota restriction
- */
-class INodeDirectoryWithQuota extends INodeDirectory {
-  private long quota;
-  private long count;
-  
-  /** Convert an existing directory inode to one with the given quota
-   * 
-   * @param quota Quota to be assigned to this inode
-   * @param other The other inode from which all other properties are copied
-   */
-  INodeDirectoryWithQuota(long quota, INodeDirectory other)
-  throws QuotaExceededException {
-    super(other);
-    this.count = other.numItemsInTree();
-    setQuota(quota);
-  }
-  
-  /** constructor with no quota verification */
-  INodeDirectoryWithQuota(
-      PermissionStatus permissions, long modificationTime, long quota)
-  {
-    super(permissions, modificationTime);
-    this.quota = quota;
-  }
-  
-  /** constructor with no quota verification */
-  INodeDirectoryWithQuota(String name, PermissionStatus permissions, long quota)
-  {
-    super(name, permissions);
-    this.quota = quota;
-  }
-  
-  /** Get this directory's quota
-   * @return this directory's quota
-   */
-  long getQuota() {
-    return quota;
-  }
-  
-  /** Set this directory's quota
-   * 
-   * @param quota Quota to be set
-   * @throws QuotaExceededException if the given quota is less than 
-   *                                the size of the tree
-   */
-  void setQuota(long quota) throws QuotaExceededException {
-    verifyQuota(quota, this.count);
-    this.quota = quota;
-  }
-  
-  /** Get the number of names in the subtree rooted at this directory
-   * @return the size of the subtree rooted at this directory
-   */
-  long numItemsInTree() {
-    return count;
-  }
   
-  /** Update the size of the tree
-   * 
-   * @param delta the change of the tree size
-   * @throws QuotaExceededException if the changed size is greater 
-   *                                than the quota
-   */
-  void updateNumItemsInTree(long delta) throws QuotaExceededException {
-    long newCount = this.count + delta;
-    if (delta>0) {
-      verifyQuota(this.quota, newCount);
-    }
-    this.count = newCount;
-  }
-  
-  /** Set the size of the tree rooted at this directory
-   * 
-   * @param count size of the directory to be set
-   * @throws QuotaExceededException if the given count is greater than quota
-   */
-  void setCount(long count) throws QuotaExceededException {
-    verifyQuota(this.quota, count);
-    this.count = count;
-  }
-  
-  /** Verify if the count satisfies the quota restriction 
-   * @throws QuotaExceededException if the given quota is less than the count
-   */
-  private static void verifyQuota(long quota, long count)
-  throws QuotaExceededException {
-    if (quota < count) {
-      throw new QuotaExceededException(quota, count);
-    }
-  }
-}
-
-class INodeFile extends INode {
-  static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
-
-  protected BlockInfo blocks[] = null;
-  protected short blockReplication;
-  protected long preferredBlockSize;
-
-  INodeFile(PermissionStatus permissions,
-            int nrBlocks, short replication, long modificationTime,
-            long preferredBlockSize) {
-    this(permissions, new BlockInfo[nrBlocks], replication,
-        modificationTime, preferredBlockSize);
-  }
-
-  protected INodeFile() {
-    blocks = null;
-    blockReplication = 0;
-    preferredBlockSize = 0;
-  }
-
-  protected INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
-                      short replication, long modificationTime,
-                      long preferredBlockSize) {
-    super(permissions, modificationTime);
-    this.blockReplication = replication;
-    this.preferredBlockSize = preferredBlockSize;
-    blocks = blklist;
-  }
-
-  /**
-   * Set the {@link FsPermission} of this {@link INodeFile}.
-   * Since this is a file,
-   * the {@link FsAction#EXECUTE} action, if any, is ignored.
-   */
-  protected void setPermission(FsPermission permission) {
-    super.setPermission(permission.applyUMask(UMASK));
-  }
-
-  boolean isDirectory() {
-    return false;
-  }
-
-  /**
-   * Get block replication for the file 
-   * @return block replication
-   */
-  short getReplication() {
-    return this.blockReplication;
-  }
-
-  void setReplication(short replication) {
-    this.blockReplication = replication;
-  }
-
-  /**
-   * Get file blocks 
-   * @return file blocks
-   */
-  BlockInfo[] getBlocks() {
-    return this.blocks;
-  }
-
-  /**
-   * add a block to the block list
-   */
-  void addBlock(BlockInfo newblock) {
-    if (this.blocks == null) {
-      this.blocks = new BlockInfo[1];
-      this.blocks[0] = newblock;
-    } else {
-      int size = this.blocks.length;
-      BlockInfo[] newlist = new BlockInfo[size + 1];
-      for (int i = 0; i < size; i++) {
-        newlist[i] = this.blocks[i];
-      }
-      newlist[size] = newblock;
-      this.blocks = newlist;
-    }
-  }
-
-  /**
-   * Set file block
-   */
-  void setBlock(int idx, BlockInfo blk) {
-    this.blocks[idx] = blk;
-  }
-
-  int collectSubtreeBlocksAndClear(List<Block> v) {
-    parent = null;
-    for (Block blk : blocks) {
-      v.add(blk);
-    }
-    blocks = null;
-    return 1;
-  }
-
-  /** {@inheritDoc} */
-  long[] computeContentSummary(long[] summary) {
-    long bytes = 0;
-    for(Block blk : blocks) {
-      bytes += blk.getNumBytes();
-    }
-    summary[0] += bytes;
-    summary[1]++;
-    return summary;
-  }
-
-  /**
-   * Get the preferred block size of the file.
-   * @return the number of bytes
-   */
-  long getPreferredBlockSize() {
-    return preferredBlockSize;
-  }
-
-  /**
-   * Return the penultimate allocated block for this file.
-   */
-  Block getPenultimateBlock() {
-    if (blocks == null || blocks.length <= 1) {
-      return null;
-    }
-    return blocks[blocks.length - 2];
-  }
-
-  INodeFileUnderConstruction toINodeFileUnderConstruction(
-      String clientName, String clientMachine, DatanodeDescriptor clientNode
-      ) throws IOException {
-    if (isUnderConstruction()) {
-      return (INodeFileUnderConstruction)this;
-    }
-    return new INodeFileUnderConstruction(name,
-        blockReplication, modificationTime, preferredBlockSize,
-        blocks, getPermissionStatus(),
-        clientName, clientMachine, clientNode);
-  }
-}
-
-class INodeFileUnderConstruction extends INodeFile {
-  StringBytesWritable clientName = null;         // lease holder
-  StringBytesWritable clientMachine = null;
-  DatanodeDescriptor clientNode = null; // if client is a cluster node too.
-
-  private int primaryNodeIndex = -1; //the node working on lease recovery
-  private DatanodeDescriptor[] targets = null;   //locations for last block
   
-  INodeFileUnderConstruction() {}
-
-  INodeFileUnderConstruction(PermissionStatus permissions,
-                             short replication,
-                             long preferredBlockSize,
-                             long modTime,
-                             String clientName,
-                             String clientMachine,
-                             DatanodeDescriptor clientNode) 
-                             throws IOException {
-    super(permissions.applyUMask(UMASK), 0, replication, modTime,
-        preferredBlockSize);
-    this.clientName = new StringBytesWritable(clientName);
-    this.clientMachine = new StringBytesWritable(clientMachine);
-    this.clientNode = clientNode;
-  }
-
-  INodeFileUnderConstruction(byte[] name,
-                             short blockReplication,
-                             long modificationTime,
-                             long preferredBlockSize,
-                             BlockInfo[] blocks,
-                             PermissionStatus perm,
-                             String clientName,
-                             String clientMachine,
-                             DatanodeDescriptor clientNode)
-                             throws IOException {
-    super(perm, blocks, blockReplication, modificationTime, 
-          preferredBlockSize);
-    setLocalName(name);
-    this.clientName = new StringBytesWritable(clientName);
-    this.clientMachine = new StringBytesWritable(clientMachine);
-    this.clientNode = clientNode;
-  }
-
-  String getClientName() throws IOException {
-    return clientName.getString();
-  }
-
-  String getClientMachine() throws IOException {
-    return clientMachine.getString();
-  }
-
-  DatanodeDescriptor getClientNode() {
-    return clientNode;
-  }
-
-  /**
-   * Is this inode being constructed?
-   */
-  @Override
-  boolean isUnderConstruction() {
-    return true;
-  }
-
-  DatanodeDescriptor[] getTargets() {
-    return targets;
-  }
-
-  void setTargets(DatanodeDescriptor[] targets) {
-    this.targets = targets;
-    this.primaryNodeIndex = -1;
-  }
-
-  //
-  // converts a INodeFileUnderConstruction into a INodeFile
-  //
-  INodeFile convertToInodeFile() {
-    INodeFile obj = new INodeFile(getPermissionStatus(),
-                                  getBlocks(),
-                                  getReplication(),
-                                  getModificationTime(),
-                                  getPreferredBlockSize());
-    return obj;
-    
-  }
-
-  /**
-   * remove a block from the block list. This block should be
-   * the last one on the list.
-   */
-  void removeBlock(Block oldblock) throws IOException {
-    if (blocks == null) {
-      throw new IOException("Trying to delete non-existant block " + oldblock);
-    }
-    int size_1 = blocks.length - 1;
-    if (!blocks[size_1].equals(oldblock)) {
-      throw new IOException("Trying to delete non-last block " + oldblock);
-    }
-
-    //copy to a new list
-    BlockInfo[] newlist = new BlockInfo[size_1];
-    System.arraycopy(blocks, 0, newlist, 0, size_1);
-    blocks = newlist;
-    
-    // Remove the block locations for the last block.
-    targets = null;
-  }
-
-  void setLastBlock(BlockInfo newblock, DatanodeDescriptor[] newtargets
-      ) throws IOException {
-    if (blocks == null) {
-      throw new IOException("Trying to update non-existant block (newblock="
-          + newblock + ")");
-    }
-    blocks[blocks.length - 1] = newblock;
-    setTargets(newtargets);
-  }
-
-  /**
-   * Initialize lease recovery for this object
-   */
-  void assignPrimaryDatanode() {
-    //assign the first alive datanode as the primary datanode
-
-    if (targets.length == 0) {
-      NameNode.stateChangeLog.warn("BLOCK*"
-        + " INodeFileUnderConstruction.initLeaseRecovery:"
-        + " No blocks found, lease removed.");
-    }
-
-    int previous = primaryNodeIndex;
-    //find an alive datanode beginning from previous
-    for(int i = 1; i <= targets.length; i++) {
-      int j = (previous + i)%targets.length;
-      if (targets[j].isAlive) {
-        DatanodeDescriptor primary = targets[primaryNodeIndex = j]; 
-        primary.addBlockToBeRecovered(blocks[blocks.length - 1], targets);
-        NameNode.stateChangeLog.info("BLOCK* " + blocks[blocks.length - 1]
-          + " recovery started.");
-      }
-    }
+  LocatedBlocks createLocatedBlocks(List<LocatedBlock> blocks) {
+    return new LocatedBlocks(computeContentSummary().getLength(), blocks,
+        isUnderConstruction());
   }
 }



Mime
View raw message