hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r673857 [2/6] - in /hadoop/core/trunk: ./ bin/ conf/ docs/ src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ src/contrib/index/src/java/org/apache/hadoop/contri...
Date Thu, 03 Jul 2008 22:55:18 GMT
Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java?rev=673857&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java Thu Jul  3 15:55:06 2008
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.util.StringTokenizer;
+import org.apache.hadoop.fs.Path;
+
+public class DFSUtil {
+  /**
+   * Whether the pathname is valid.  Currently prohibits relative paths, 
+   * and names which contain a ":" or "/" 
+   */
+  public static boolean isValidName(String src) {
+      
+    // Path must be absolute.
+    if (!src.startsWith(Path.SEPARATOR)) {
+      return false;
+    }
+      
+    // Check for ".." "." ":" "/"
+    StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
+    while(tokens.hasMoreTokens()) {
+      String element = tokens.nextToken();
+      if (element.equals("..") || 
+          element.equals(".")  ||
+          (element.indexOf(":") >= 0)  ||
+          (element.indexOf("/") >= 0)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+}
+

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DistributedFileSystem.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DistributedFileSystem.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DistributedFileSystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs;
 
 import java.io.*;
 import java.net.*;
@@ -24,10 +24,18 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
-import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.DFSFileInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.*;
 
+
 /****************************************************************
  * Implementation of the abstract FileSystem for the DFS system.
  * This object is the way end-user code interacts with a Hadoop
@@ -91,7 +99,7 @@
 
   public void setWorkingDirectory(Path dir) {
     String result = makeAbsolute(dir).toUri().getPath();
-    if (!FSNamesystem.isValidName(result)) {
+    if (!DFSUtil.isValidName(result)) {
       throw new IllegalArgumentException("Invalid DFS directory name " + 
                                          result);
     }
@@ -106,7 +114,7 @@
   private String getPathName(Path file) {
     checkPath(file);
     String result = makeAbsolute(file).toUri().getPath();
-    if (!FSNamesystem.isValidName(result)) {
+    if (!DFSUtil.isValidName(result)) {
       throw new IllegalArgumentException("Pathname " + result + " from " +
                                          file+" is not a valid DFS filename.");
     }
@@ -183,16 +191,16 @@
   }
 
   /** Clear a directory's quota
-   * @see ClientProtocol#clearQuota(String)
+   * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#clearQuota(String)
    */
-  void clearQuota(Path src) throws IOException {
+  public void clearQuota(Path src) throws IOException {
     dfs.clearQuota(getPathName(src));
   }
   
   /** Set a directory's quota
-   * @see ClientProtocol#setQuota(String, long) 
+   * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long) 
    */
-  void setQuota(Path src, long quota) throws IOException {
+  public void setQuota(Path src, long quota) throws IOException {
     if (quota <= 0) {
       throw new IllegalArgumentException("Quota should be a positive number: "
           + quota);
@@ -287,7 +295,7 @@
   /**
    * Enter, leave or get safe mode.
    *  
-   * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(
+   * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
    *    FSConstants.SafeModeAction)
    */
   public boolean setSafeMode(FSConstants.SafeModeAction action) 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/HftpFileSystem.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/HftpFileSystem.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/HftpFileSystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs;
 
 import java.io.FileNotFoundException;
 import java.io.InputStream;
@@ -50,6 +50,7 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
 import org.apache.hadoop.security.*;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
@@ -57,8 +58,8 @@
 /** An implementation of a protocol for accessing filesystems over HTTP.
  * The following implementation provides a limited, read-only interface
  * to a filesystem over HTTP.
- * @see org.apache.hadoop.dfs.ListPathsServlet
- * @see org.apache.hadoop.dfs.FileDataServlet
+ * @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
+ * @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet
  */
 public class HftpFileSystem extends FileSystem {
   static {

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HsftpFileSystem.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/HsftpFileSystem.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HsftpFileSystem.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HsftpFileSystem.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/HsftpFileSystem.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/HsftpFileSystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HsftpFileSystem.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 
@@ -30,8 +30,8 @@
 /** An implementation of a protocol for accessing filesystems over HTTPS.
  * The following implementation provides a limited, read-only interface
  * to a filesystem over HTTPS.
- * @see org.apache.hadoop.dfs.ListPathsServlet
- * @see org.apache.hadoop.dfs.FileDataServlet
+ * @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
+ * @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet
  */
 public class HsftpFileSystem extends HftpFileSystem {
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/AlreadyBeingCreatedException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/AlreadyBeingCreatedException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/AlreadyBeingCreatedException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/Block.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Block.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/Block.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/Block.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Block.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Block.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/Block.java Thu Jul  3 15:55:06 2008
@@ -15,9 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.*;
+
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.io.*;
 
 /**************************************************
@@ -25,7 +27,7 @@
  * long.
  *
  **************************************************/
-class Block implements Writable, Comparable<Block> {
+public class Block implements Writable, Comparable<Block> {
 
   static {                                      // register a ctor
     WritableFactories.setFactory
@@ -37,11 +39,11 @@
 
   // generation stamp of blocks that pre-date the introduction of
   // a generation stamp.
-  static final long GRANDFATHER_GENERATION_STAMP = 0;
+  public static final long GRANDFATHER_GENERATION_STAMP = 0;
 
   /**
    */
-  static boolean isBlockFilename(File f) {
+  public static boolean isBlockFilename(File f) {
     String name = f.getName();
     if ( name.startsWith( "blk_" ) && 
         name.indexOf( '.' ) < 0 ) {
@@ -55,19 +57,19 @@
     return Long.parseLong(name.substring("blk_".length()));
   }
 
-  long blkid;
-  long len;
-  long generationStamp;
+  private long blockId;
+  private long numBytes;
+  private long generationStamp;
 
-  Block() {this(0, 0, 0);}
+  public Block() {this(0, 0, 0);}
 
-  Block(final long blkid, final long len, final long generationStamp) {
+  public Block(final long blkid, final long len, final long generationStamp) {
     set(blkid, len, generationStamp);
   }
 
-  Block(final long blkid) {this(blkid, 0, GenerationStamp.WILDCARD_STAMP);}
+  public Block(final long blkid) {this(blkid, 0, GenerationStamp.WILDCARD_STAMP);}
 
-  Block(Block blk) {this(blk.blkid, blk.len, blk.generationStamp);}
+  public Block(Block blk) {this(blk.blockId, blk.numBytes, blk.generationStamp);}
 
   /**
    * Find the blockid from the given filename
@@ -77,34 +79,42 @@
   }
 
   public void set(long blkid, long len, long genStamp) {
-    this.blkid = blkid;
-    this.len = len;
+    this.blockId = blkid;
+    this.numBytes = len;
     this.generationStamp = genStamp;
   }
   /**
    */
   public long getBlockId() {
-    return blkid;
+    return blockId;
+  }
+  
+  public void setBlockId(long bid) {
+    blockId = bid;
   }
 
   /**
    */
   public String getBlockName() {
-    return "blk_" + String.valueOf(blkid);
+    return "blk_" + String.valueOf(blockId);
   }
 
   /**
    */
   public long getNumBytes() {
-    return len;
+    return numBytes;
   }
   public void setNumBytes(long len) {
-    this.len = len;
+    this.numBytes = len;
   }
 
-  long getGenerationStamp() {
+  public long getGenerationStamp() {
     return generationStamp;
   }
+  
+  public void setGenerationStamp(long stamp) {
+    generationStamp = stamp;
+  }
 
   /**
    */
@@ -116,17 +126,17 @@
   // Writable
   /////////////////////////////////////
   public void write(DataOutput out) throws IOException {
-    out.writeLong(blkid);
-    out.writeLong(len);
+    out.writeLong(blockId);
+    out.writeLong(numBytes);
     out.writeLong(generationStamp);
   }
 
   public void readFields(DataInput in) throws IOException {
-    this.blkid = in.readLong();
-    this.len = in.readLong();
+    this.blockId = in.readLong();
+    this.numBytes = in.readLong();
     this.generationStamp = in.readLong();
-    if (len < 0) {
-      throw new IOException("Unexpected block size: " + len);
+    if (numBytes < 0) {
+      throw new IOException("Unexpected block size: " + numBytes);
     }
   }
 
@@ -146,9 +156,9 @@
     validateGenerationStamp(this.generationStamp);
     validateGenerationStamp(b.generationStamp);
 
-    if (blkid < b.blkid) {
+    if (blockId < b.blockId) {
       return -1;
-    } else if (blkid == b.blkid) {
+    } else if (blockId == b.blockId) {
       return GenerationStamp.compare(generationStamp, b.generationStamp);
     } else {
       return 1;
@@ -162,7 +172,7 @@
     }
     final Block that = (Block)o;
     //Wildcard generationStamp is ALLOWED here
-    return this.blkid == that.blkid
+    return this.blockId == that.blockId
       && GenerationStamp.equalsWithWildcard(
           this.generationStamp, that.generationStamp);
   }
@@ -170,6 +180,6 @@
   /** {@inheritDoc} */
   public int hashCode() {
     //GenerationStamp is IRRELEVANT and should not be used here
-    return 37 * 17 + (int) (blkid^(blkid>>>32));
+    return 37 * 17 + (int) (blockId^(blockId>>>32));
   }
 }

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockListAsLongs.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockListAsLongs.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockListAsLongs.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 /**
  * This class provides an interface for accessing list of blocks that
@@ -24,7 +24,7 @@
  * as a Block[] we can send it as a long[].
  *
  */
-class BlockListAsLongs {
+public class BlockListAsLongs {
   /**
    * A block as 3 longs
    *   block-id and block length and generation stamp
@@ -49,7 +49,7 @@
    * @return the output array of long[]
    */
   
-  static long[] convertToArrayLongs(final Block[] blockArray) {
+  public static long[] convertToArrayLongs(final Block[] blockArray) {
     long[] blocksAsLongs = new long[blockArray.length * LONGS_PER_BLOCK];
 
     BlockListAsLongs bl = new BlockListAsLongs(blocksAsLongs);
@@ -65,7 +65,7 @@
    * Constructor
    * @param iBlockList - BlockListALongs create from this long[] parameter
    */
-  BlockListAsLongs(final long[] iBlockList) {
+  public BlockListAsLongs(final long[] iBlockList) {
     if (iBlockList == null) {
       blockList = new long[0];
     } else {
@@ -82,7 +82,7 @@
    * The number of blocks
    * @return - the number of blocks
    */
-  int getNumberOfBlocks() {
+  public int getNumberOfBlocks() {
     return blockList.length/LONGS_PER_BLOCK;
   }
   
@@ -92,7 +92,7 @@
    * @param index - the block whose block-id is desired
    * @return the block-id
    */
-  long getBlockId(final int index)  {
+  public long getBlockId(final int index)  {
     return blockList[index2BlockId(index)];
   }
   
@@ -101,7 +101,7 @@
    * @param index - the block whose block-len is desired
    * @return - the block-len
    */
-  long getBlockLen(final int index)  {
+  public long getBlockLen(final int index)  {
     return blockList[index2BlockLen(index)];
   }
 
@@ -110,7 +110,7 @@
    * @param index - the block whose block-len is desired
    * @return - the generation stamp
    */
-  long getBlockGenStamp(final int index)  {
+  public long getBlockGenStamp(final int index)  {
     return blockList[index2BlockGenStamp(index)];
   }
   

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlocksWithLocations.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlocksWithLocations.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlocksWithLocations.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -29,12 +29,12 @@
  *  It provide efficient customized serialization/deserialization methods
  *  in stead of using the default array (de)serialization provided by RPC
  */
-class BlocksWithLocations implements Writable {
+public class BlocksWithLocations implements Writable {
 
   /**
    * A class to keep track of a block and its locations
    */
-  static class BlockWithLocations  implements Writable {
+  public static class BlockWithLocations  implements Writable {
     Block block;
     String datanodeIDs[];
     
@@ -51,12 +51,12 @@
     }
     
     /** get the block */
-    Block getBlock() {
+    public Block getBlock() {
       return block;
     }
     
     /** get the block's locations */
-    String[] getDatanodes() {
+    public String[] getDatanodes() {
       return datanodeIDs;
     }
     
@@ -87,12 +87,12 @@
   }
 
   /** Constructor with one parameter */
-  BlocksWithLocations( BlockWithLocations[] blocks ) {
+  public BlocksWithLocations( BlockWithLocations[] blocks ) {
     this.blocks = blocks;
   }
 
   /** getter */
-  BlockWithLocations[] getBlocks() {
+  public BlockWithLocations[] getBlocks() {
     return blocks;
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ClientDatanodeProtocol.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ClientDatanodeProtocol.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ClientDatanodeProtocol.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 
@@ -25,7 +25,7 @@
 
 /** An client-datanode protocol for block recovery
  */
-interface ClientDatanodeProtocol extends VersionedProtocol {
+public interface ClientDatanodeProtocol extends VersionedProtocol {
   public static final Log LOG = LogFactory.getLog(ClientDatanodeProtocol.class);
 
   /**
@@ -35,7 +35,7 @@
 
   /** Start generation-stamp recovery for specified block
    * @param block the specified block
-   * @param DatanodeInfo the list of possible locations of specified block
+   * @param targets the list of possible locations of specified block
    * @return the new blockid if recovery successful and the generation stamp
    * got updated as part of the recovery, else returns null if the block id
    * not have any data and the block was deleted.

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ClientProtocol.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ClientProtocol.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/ClientProtocol.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Thu Jul  3 15:55:06 2008
@@ -15,23 +15,25 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.*;
 
 import org.apache.hadoop.ipc.VersionedProtocol;
-import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.fs.ContentSummary;
 
 /**********************************************************************
  * ClientProtocol is used by user code via 
- * {@link DistributedFileSystem} class to communicate 
+ * {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate 
  * with the NameNode.  User code can manipulate the directory namespace, 
  * as well as open/close file streams, etc.
  *
  **********************************************************************/
-interface ClientProtocol extends VersionedProtocol {
+public interface ClientProtocol extends VersionedProtocol {
 
   /**
    * Compared to the previous version the following changes have been introduced:

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DFSFileInfo.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DFSFileInfo.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DFSFileInfo.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java Thu Jul  3 15:55:06 2008
@@ -15,8 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileStatus;
@@ -29,7 +31,7 @@
  * Block locations are sorted by the distance to the current client.
  * 
  ******************************************************/
-class DFSFileInfo extends FileStatus {
+public class DFSFileInfo extends FileStatus {
   static {                                      // register a ctor
     WritableFactories.setFactory
       (DFSFileInfo.class,

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeID.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeID.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeID.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeID.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeID.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeID.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeID.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -32,12 +32,12 @@
  * 
  */
 public class DatanodeID implements WritableComparable {
-  static final DatanodeID[] EMPTY_ARRAY = {}; 
+  public static final DatanodeID[] EMPTY_ARRAY = {}; 
 
-  protected String name;      /// hostname:portNumber
-  protected String storageID; /// unique per cluster storageID
+  public String name;      /// hostname:portNumber
+  public String storageID; /// unique per cluster storageID
   protected int infoPort;     /// the port where the infoserver is running
-  protected int ipcPort;     /// the port where the ipc server is running
+  public int ipcPort;     /// the port where the ipc server is running
 
   /** Equivalent to DatanodeID(""). */
   public DatanodeID() {this("");}
@@ -101,9 +101,9 @@
   }
 
   /**
-   * @sets data storage ID.
+   * sets the data storage ID.
    */
-  void setStorageID(String storageID) {
+  public void setStorageID(String storageID) {
     this.storageID = storageID;
   }
 
@@ -150,7 +150,7 @@
    * Update fields when a new registration request comes in.
    * Note that this does not update storageID.
    */
-  void updateRegInfo(DatanodeID nodeReg) {
+  public void updateRegInfo(DatanodeID nodeReg) {
     name = nodeReg.getName();
     infoPort = nodeReg.getInfoPort();
     // update any more fields added in future.

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeInfo.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeInfo.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DatanodeInfo.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -55,12 +55,12 @@
   protected AdminStates adminState;
 
 
-  DatanodeInfo() {
+  public DatanodeInfo() {
     super();
     adminState = null;
   }
   
-  DatanodeInfo(DatanodeInfo from) {
+  public DatanodeInfo(DatanodeInfo from) {
     super(from);
     this.capacity = from.getCapacity();
     this.dfsUsed = from.getDfsUsed();
@@ -72,7 +72,7 @@
     this.hostName = from.hostName;
   }
 
-  DatanodeInfo(DatanodeID nodeID) {
+  public DatanodeInfo(DatanodeID nodeID) {
     super(nodeID);
     this.capacity = 0L;
     this.dfsUsed = 0L;
@@ -82,7 +82,7 @@
     this.adminState = null;    
   }
   
-  DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
+  protected DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
     this(nodeID);
     this.location = location;
     this.hostName = hostName;
@@ -104,22 +104,22 @@
   public int getXceiverCount() { return xceiverCount; }
 
   /** Sets raw capacity. */
-  void setCapacity(long capacity) { 
+  public void setCapacity(long capacity) { 
     this.capacity = capacity; 
   }
 
   /** Sets raw free space. */
-  void setRemaining(long remaining) { 
+  public void setRemaining(long remaining) { 
     this.remaining = remaining; 
   }
 
   /** Sets time when this information was accurate. */
-  void setLastUpdate(long lastUpdate) { 
+  public void setLastUpdate(long lastUpdate) { 
     this.lastUpdate = lastUpdate; 
   }
 
   /** Sets number of active connections */
-  void setXceiverCount(int xceiverCount) { 
+  public void setXceiverCount(int xceiverCount) { 
     this.xceiverCount = xceiverCount; 
   }
 
@@ -166,7 +166,7 @@
   }
 
   /** A formatted string for printing the status of the DataNode. */
-  String dumpDatanode() {
+  public String dumpDatanode() {
     StringBuffer buffer = new StringBuffer();
     long c = getCapacity();
     long r = getRemaining();
@@ -195,7 +195,7 @@
    * Start decommissioning a node.
    * old state.
    */
-  void startDecommission() {
+  public void startDecommission() {
     adminState = AdminStates.DECOMMISSION_INPROGRESS;
   }
 
@@ -203,14 +203,14 @@
    * Stop decommissioning a node.
    * old state.
    */
-  void stopDecommission() {
+  public void stopDecommission() {
     adminState = null;
   }
 
   /**
    * Returns true if the node is in the process of being decommissioned
    */
-  boolean isDecommissionInProgress() {
+  public boolean isDecommissionInProgress() {
     if (adminState == AdminStates.DECOMMISSION_INPROGRESS) {
       return true;
     }
@@ -220,7 +220,7 @@
   /**
    * Returns true if the node has been decommissioned.
    */
-  boolean isDecommissioned() {
+  public boolean isDecommissioned() {
     if (adminState == AdminStates.DECOMMISSIONED) {
       return true;
     }
@@ -230,7 +230,7 @@
   /**
    * Sets the admin state to indicate that decommision is complete.
    */
-  void setDecommissioned() {
+  public void setDecommissioned() {
     adminState = AdminStates.DECOMMISSIONED;
   }
 
@@ -247,7 +247,7 @@
   /**
    * Sets the admin state of this node.
    */
-  void setAdminState(AdminStates newState) {
+  protected void setAdminState(AdminStates newState) {
     if (newState == AdminStates.NORMAL) {
       adminState = null;
     }

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSConstants.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSConstants.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSConstants.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import org.apache.hadoop.conf.Configuration;
 
@@ -134,7 +134,7 @@
     
   public static final int BUFFER_SIZE = new Configuration().getInt("io.file.buffer.size", 4096);
   //Used for writing header etc.
-  static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512);
+  public static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512);
   //TODO mb@media-style.com: should be conf injected?
   public static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024;
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
@@ -155,7 +155,7 @@
     
     private String name = null;
     private StartupOption(String arg) {this.name = arg;}
-    String getName() {return name;}
+    public String getName() {return name;}
   }
 
   // type of the datanode report

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlock.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LocatedBlock.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlock.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlock.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LocatedBlock.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LocatedBlock.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlock.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import org.apache.hadoop.io.*;
 
@@ -26,7 +26,7 @@
  * objects.  It tells where to find a Block.
  * 
  ****************************************************/
-class LocatedBlock implements Writable {
+public class LocatedBlock implements Writable {
 
   static {                                      // register a ctor
     WritableFactories.setFactory
@@ -84,15 +84,15 @@
 
   /**
    */
-  DatanodeInfo[] getLocations() {
+  public DatanodeInfo[] getLocations() {
     return locs;
   }
   
-  long getStartOffset() {
+  public long getStartOffset() {
     return offset;
   }
   
-  long getBlockSize() {
+  public long getBlockSize() {
     return b.getNumBytes();
   }
 
@@ -104,7 +104,7 @@
     this.corrupt = corrupt;
   }
   
-  boolean isCorrupt() {
+  public boolean isCorrupt() {
     return this.corrupt;
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LocatedBlocks.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LocatedBlocks.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LocatedBlocks.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -43,10 +43,11 @@
     underConstruction = false;
   }
   
-  LocatedBlocks(INodeFile inode, List<LocatedBlock> blks) {
-    fileLength = inode.computeContentSummary().getLength();
+  public LocatedBlocks(long flength, List<LocatedBlock> blks, boolean isUnderConstuction) {
+
+    fileLength = flength;
     blocks = blks;
-    underConstruction = inode.isUnderConstruction();
+    underConstruction = isUnderConstuction;
   }
   
   /**
@@ -90,7 +91,7 @@
    * 
    * @return block if found, or null otherwise.
    */
-  int findBlock(long offset) {
+  public int findBlock(long offset) {
     // create fake block of size 1 as a key
     LocatedBlock key = new LocatedBlock();
     key.setStartOffset(offset);
@@ -114,7 +115,7 @@
     return Collections.binarySearch(blocks, key, comp);
   }
   
-  void insertRange(int blockIdx, List<LocatedBlock> newBlocks) {
+  public void insertRange(int blockIdx, List<LocatedBlock> newBlocks) {
     int oldIdx = blockIdx;
     int insStart = 0, insEnd = 0;
     for(int newIdx = 0; newIdx < newBlocks.size() && oldIdx < blocks.size(); 
@@ -142,7 +143,7 @@
     }
   }
   
-  static int getInsertIndex(int binSearchResult) {
+  public static int getInsertIndex(int binSearchResult) {
     return binSearchResult >= 0 ? binSearchResult : -(binSearchResult+1);
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/QuotaExceededException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/QuotaExceededException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/QuotaExceededException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/UnregisteredDatanodeException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UnregisteredDatanodeException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/UnregisteredDatanodeException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/UnregisteredDatanodeException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UnregisteredDatanodeException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UnregisteredDatanodeException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/UnregisteredDatanodeException.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 
@@ -26,7 +26,7 @@
  * registered is trying to access the name node.
  * 
  */
-class UnregisteredDatanodeException extends IOException {
+public class UnregisteredDatanodeException extends IOException {
 
   public UnregisteredDatanodeException(DatanodeID nodeID) {
     super("Unregistered data node: " + nodeID.getName());

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Balancer.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Balancer.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Balancer.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.balancer;
 
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
@@ -51,8 +51,13 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.BlocksWithLocations.BlockWithLocations;
-import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.common.Util;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
@@ -171,7 +176,7 @@
 
 public class Balancer implements Tool {
   private static final Log LOG = 
-    LogFactory.getLog("org.apache.hadoop.dfs.Balancer");
+    LogFactory.getLog(Balancer.class.getName());
   final private static long MAX_BLOCKS_SIZE_TO_FETCH = 2*1024*1024*1024L; //2GB
 
   private Configuration conf;
@@ -724,7 +729,7 @@
      */ 
     private static final long MAX_ITERATION_TIME = 20*60*1000L; //20 mins
     private void dispatchBlocks() {
-      long startTime = FSNamesystem.now();
+      long startTime = Util.now();
       this.blocksToReceive = 2*scheduledSize;
       boolean isTimeUp = false;
       while(!isTimeUp && scheduledSize>0 &&
@@ -753,7 +758,7 @@
         } 
         
         // check if time is up or not
-        if (FSNamesystem.now()-startTime > MAX_ITERATION_TIME) {
+        if (Util.now()-startTime > MAX_ITERATION_TIME) {
           isTimeUp = true;
           continue;
         }
@@ -1365,7 +1370,7 @@
    * @exception any exception occurs during datanode balancing
    */
   public int run(String[] args) throws Exception {
-    long startTime = FSNamesystem.now();
+    long startTime = Util.now();
     OutputStream out = null;
     try {
       // initialize a balancer
@@ -1463,7 +1468,7 @@
       } catch(IOException ignored) {
       }
       System.out.println("Balancing took " + 
-          time2Str(FSNamesystem.now()-startTime));
+          time2Str(Util.now()-startTime));
     }
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStamp.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStamp.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStamp.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStamp.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStamp.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStamp.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStamp.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.*;
 import org.apache.hadoop.io.*;
@@ -23,7 +23,7 @@
 /****************************************************************
  * A GenerationStamp is a Hadoop FS primitive, identified by a long.
  ****************************************************************/
-class GenerationStamp implements WritableComparable<GenerationStamp> {
+public class GenerationStamp implements WritableComparable<GenerationStamp> {
   public static final long WILDCARD_STAMP = 1;
   public static final long FIRST_VALID_STAMP = 1000L;
 
@@ -40,7 +40,7 @@
   /**
    * Create a new instance, initialized to FIRST_VALID_STAMP.
    */
-  GenerationStamp() {this(GenerationStamp.FIRST_VALID_STAMP);}
+  public GenerationStamp() {this(GenerationStamp.FIRST_VALID_STAMP);}
 
   /**
    * Create a new instance, initialized to the specified value.
@@ -86,7 +86,7 @@
   /////////////////////////////////////
   // Comparable
   /////////////////////////////////////
-  static int compare(long x, long y) {
+  public static int compare(long x, long y) {
     return x < y? -1: x == y? 0: 1;
   }
 
@@ -103,7 +103,7 @@
     return genstamp == ((GenerationStamp)o).genstamp;
   }
 
-  static boolean equalsWithWildcard(long x, long y) {
+  public static boolean equalsWithWildcard(long x, long y) {
     return x == y || x == WILDCARD_STAMP || y == WILDCARD_STAMP;  
   }
 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStampStatsUpgradeCommand.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampStatsUpgradeCommand.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStampStatsUpgradeCommand.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStampStatsUpgradeCommand.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampStatsUpgradeCommand.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/GenerationStampStatsUpgradeCommand.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStampStatsUpgradeCommand.java Thu Jul  3 15:55:06 2008
@@ -15,19 +15,23 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.*;
 
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.server.namenode.GenerationStampUpgradeNamenode;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+
 /**
  * The Datanode sends this statistics object to the Namenode periodically
  * during a Generation Stamp Upgrade.
  */
-class GenerationStampStatsUpgradeCommand extends UpgradeCommand {
-  DatanodeID datanodeId;
-  int blocksUpgraded;
-  int blocksRemaining;
-  int errors;
+public class GenerationStampStatsUpgradeCommand extends UpgradeCommand {
+  public DatanodeID datanodeId;
+  public int blocksUpgraded;
+  public int blocksRemaining;
+  public int errors;
 
   GenerationStampStatsUpgradeCommand() {
     super(GenerationStampUpgradeNamenode.DN_CMD_STATS, 0, (short)0);

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/InconsistentFSStateException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/InconsistentFSStateException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/InconsistentFSStateException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.File;
 import java.io.IOException;
@@ -26,7 +26,7 @@
  * and is not recoverable. 
  * 
  */
-class InconsistentFSStateException extends IOException {
+public class InconsistentFSStateException extends IOException {
 
   public InconsistentFSStateException(File dir, String descr) {
     super("Directory " + getFilePath(dir)

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/IncorrectVersionException.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/IncorrectVersionException.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/IncorrectVersionException.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java Thu Jul  3 15:55:06 2008
@@ -15,16 +15,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+
 /**
  * The exception is thrown when external version does not match 
  * current version of the appication.
  * 
  */
-class IncorrectVersionException extends IOException {
+public class IncorrectVersionException extends IOException {
 
   public IncorrectVersionException(int versionReported, String ofWhat) {
     this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION);

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Storage.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Storage.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Storage.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -31,46 +31,14 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.dfs.FSConstants.StartupOption;
-import org.apache.hadoop.dfs.FSConstants.NodeType;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.StartupOption;
+import org.apache.hadoop.hdfs.protocol.FSConstants.NodeType;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 
-/**
- * Common class for storage information.
- * 
- * TODO namespaceID should be long and computed as hash(address + port)
- */
-class StorageInfo {
-  int   layoutVersion;  // Version read from the stored file.
-  int   namespaceID;    // namespace id of the storage
-  long  cTime;          // creation timestamp
-  
-  StorageInfo () {
-    this(0, 0, 0L);
-  }
-  
-  StorageInfo(int layoutV, int nsID, long cT) {
-    layoutVersion = layoutV;
-    namespaceID = nsID;
-    cTime = cT;
-  }
-  
-  StorageInfo(StorageInfo from) {
-    setStorageInfo(from);
-  }
 
-  public int    getLayoutVersion(){ return layoutVersion; }
-  public int    getNamespaceID()  { return namespaceID; }
-  public long   getCTime()        { return cTime; }
-
-  public void   setStorageInfo(StorageInfo from) {
-    layoutVersion = from.layoutVersion;
-    namespaceID = from.namespaceID;
-    cTime = from.cTime;
-  }
-}
 
 /**
  * Storage information file.
@@ -90,8 +58,8 @@
  * The locks are released when the servers stop (normally or abnormally).
  * 
  */
-abstract class Storage extends StorageInfo {
-  public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.Storage");
+public abstract class Storage extends StorageInfo {
+  public static final Log LOG = LogFactory.getLog(Storage.class.getName());
 
   // Constants
   
@@ -99,12 +67,12 @@
   protected static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
   
   // this corresponds to Hadoop-0.14.
-  protected static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7;
+  public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7;
   protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.14";
   
   private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
   protected static final String STORAGE_FILE_VERSION  = "VERSION";
-  protected   static final String STORAGE_DIR_CURRENT   = "current";
+  public static final String STORAGE_DIR_CURRENT   = "current";
   private   static final String STORAGE_DIR_PREVIOUS  = "previous";
   private   static final String STORAGE_TMP_REMOVED   = "removed.tmp";
   private   static final String STORAGE_TMP_PREVIOUS  = "previous.tmp";
@@ -112,7 +80,7 @@
   private   static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp";
   private   static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint";
   
-  protected enum StorageState {
+  public enum StorageState {
     NON_EXISTENT,
     NOT_FORMATTED,
     COMPLETE_UPGRADE,
@@ -131,11 +99,11 @@
   /**
    * One of the storage directories.
    */
-  class StorageDirectory {
-    File              root; // root directory
+  public class StorageDirectory {
+    public File              root; // root directory
     FileLock          lock; // storage lock
     
-    StorageDirectory(File dir) {
+    public StorageDirectory(File dir) {
       this.root = dir;
       this.lock = null;
     }
@@ -145,11 +113,11 @@
      * 
      * @throws IOException if file cannot be read or contains inconsistent data
      */
-    void read() throws IOException {
+    public void read() throws IOException {
       read(getVersionFile());
     }
     
-    void read(File from) throws IOException {
+    public void read(File from) throws IOException {
       RandomAccessFile file = new RandomAccessFile(from, "rws");
       FileInputStream in = null;
       try {
@@ -171,12 +139,12 @@
      * 
      * @throws IOException
      */
-    void write() throws IOException {
+    public void write() throws IOException {
       corruptPreUpgradeStorage(root);
       write(getVersionFile());
     }
 
-    void write(File to) throws IOException {
+    public void write(File to) throws IOException {
       Properties props = new Properties();
       setFields(props, this);
       RandomAccessFile file = new RandomAccessFile(to, "rws");
@@ -219,7 +187,7 @@
      * 
      * @throws IOException
      */
-    void clearDirectory() throws IOException {
+    public void clearDirectory() throws IOException {
       File curDir = this.getCurrentDir();
       if (curDir.exists())
         if (!(FileUtil.fullyDelete(curDir)))
@@ -228,31 +196,31 @@
         throw new IOException("Cannot create directory " + curDir);
     }
 
-    File getCurrentDir() {
+    public File getCurrentDir() {
       return new File(root, STORAGE_DIR_CURRENT);
     }
-    File getVersionFile() {
+    public File getVersionFile() {
       return new File(new File(root, STORAGE_DIR_CURRENT), STORAGE_FILE_VERSION);
     }
-    File getPreviousVersionFile() {
+    public File getPreviousVersionFile() {
       return new File(new File(root, STORAGE_DIR_PREVIOUS), STORAGE_FILE_VERSION);
     }
-    File getPreviousDir() {
+    public File getPreviousDir() {
       return new File(root, STORAGE_DIR_PREVIOUS);
     }
-    File getPreviousTmp() {
+    public File getPreviousTmp() {
       return new File(root, STORAGE_TMP_PREVIOUS);
     }
-    File getRemovedTmp() {
+    public File getRemovedTmp() {
       return new File(root, STORAGE_TMP_REMOVED);
     }
-    File getFinalizedTmp() {
+    public File getFinalizedTmp() {
       return new File(root, STORAGE_TMP_FINALIZED);
     }
-    File getLastCheckpointTmp() {
+    public File getLastCheckpointTmp() {
       return new File(root, STORAGE_TMP_LAST_CKPT);
     }
-    File getPreviousCheckpoint() {
+    public File getPreviousCheckpoint() {
       return new File(root, STORAGE_PREVIOUS_CKPT);
     }
 
@@ -265,7 +233,7 @@
      * @throws {@link InconsistentFSStateException} if directory state is not 
      * consistent and cannot be recovered 
      */
-    StorageState analyzeStorage(StartupOption startOpt) throws IOException {
+    public StorageState analyzeStorage(StartupOption startOpt) throws IOException {
       assert root != null : "root is null";
       String rootPath = root.getCanonicalPath();
       try { // check that storage exists
@@ -372,7 +340,7 @@
      * @param curState specifies what/how the state should be recovered
      * @throws IOException
      */
-    void doRecover(StorageState curState) throws IOException {
+    public void doRecover(StorageState curState) throws IOException {
       File curDir = getCurrentDir();
       String rootPath = root.getCanonicalPath();
       switch(curState) {
@@ -434,7 +402,7 @@
      * 
      * @throws IOException if locking fails
      */
-    void lock() throws IOException {
+    public void lock() throws IOException {
       this.lock = tryLock();
       if (lock == null) {
         String msg = "Cannot lock storage " + this.root 
@@ -474,7 +442,7 @@
      * 
      * @throws IOException
      */
-    void unlock() throws IOException {
+    public void unlock() throws IOException {
       if (this.lock == null)
         return;
       this.lock.release();
@@ -486,26 +454,26 @@
   /**
    * Create empty storage info of the specified type
    */
-  Storage(NodeType type) {
+  protected Storage(NodeType type) {
     super();
     this.storageType = type;
   }
   
-  Storage(NodeType type, int nsID, long cT) {
+  protected Storage(NodeType type, int nsID, long cT) {
     super(FSConstants.LAYOUT_VERSION, nsID, cT);
     this.storageType = type;
   }
   
-  Storage(NodeType type, StorageInfo storageInfo) {
+  protected Storage(NodeType type, StorageInfo storageInfo) {
     super(storageInfo);
     this.storageType = type;
   }
   
-  int getNumStorageDirs() {
+  public int getNumStorageDirs() {
     return storageDirs.size();
   }
   
-  StorageDirectory getStorageDir(int idx) {
+  public StorageDirectory getStorageDir(int idx) {
     return storageDirs.get(idx);
   }
   
@@ -513,7 +481,7 @@
     storageDirs.add(sd);
   }
   
-  abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException;
+  public abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException;
 
   /*
    * Coversion is no longer supported. So this should throw exception if
@@ -532,7 +500,7 @@
    * 
    * @param oldVersion
    */
-  static void checkVersionUpgradable(int oldVersion) 
+  protected static void checkVersionUpgradable(int oldVersion) 
                                      throws IOException {
     if (oldVersion > LAST_UPGRADABLE_LAYOUT_VERSION) {
       String msg = "*********** Upgrade is not supported from this older" +
@@ -601,13 +569,13 @@
     props.setProperty("cTime", String.valueOf(cTime));
   }
 
-  static void rename(File from, File to) throws IOException {
+  public static void rename(File from, File to) throws IOException {
     if (!from.renameTo(to))
       throw new IOException("Failed to rename " 
                             + from.getCanonicalPath() + " to " + to.getCanonicalPath());
   }
 
-  static void deleteDir(File dir) throws IOException {
+  protected static void deleteDir(File dir) throws IOException {
     if (!FileUtil.fullyDelete(dir))
       throw new IOException("Failed to delete " + dir.getCanonicalPath());
   }
@@ -641,7 +609,7 @@
    * @throws IOException
    * @see StorageDirectory#lock()
    */
-  boolean isLockSupported(int idx) throws IOException {
+  public boolean isLockSupported(int idx) throws IOException {
     StorageDirectory sd = storageDirs.get(idx);
     FileLock firstLock = null;
     FileLock secondLock = null;
@@ -672,7 +640,7 @@
     return VersionInfo.getRevision();
   }
 
-  static String getRegistrationID(StorageInfo storage) {
+  public static String getRegistrationID(StorageInfo storage) {
     return "NS-" + Integer.toString(storage.getNamespaceID())
       + "-" + Integer.toString(storage.getLayoutVersion())
       + "-" + Long.toString(storage.getCTime());

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/StorageInfo.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/StorageInfo.java?rev=673857&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/StorageInfo.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/StorageInfo.java Thu Jul  3 15:55:06 2008
@@ -0,0 +1,37 @@
+package org.apache.hadoop.hdfs.server.common;
+
+
+/**
+ * Common class for storage information.
+ * 
+ * TODO namespaceID should be long and computed as hash(address + port)
+ */
+public class StorageInfo {
+  public int   layoutVersion;  // Version read from the stored file.
+  public int   namespaceID;    // namespace id of the storage
+  public long  cTime;          // creation timestamp
+  
+  public StorageInfo () {
+    this(0, 0, 0L);
+  }
+  
+  public StorageInfo(int layoutV, int nsID, long cT) {
+    layoutVersion = layoutV;
+    namespaceID = nsID;
+    cTime = cT;
+  }
+  
+  public StorageInfo(StorageInfo from) {
+    setStorageInfo(from);
+  }
+
+  public int    getLayoutVersion(){ return layoutVersion; }
+  public int    getNamespaceID()  { return namespaceID; }
+  public long   getCTime()        { return cTime; }
+
+  public void   setStorageInfo(StorageInfo from) {
+    layoutVersion = from.layoutVersion;
+    namespaceID = from.namespaceID;
+    cTime = from.cTime;
+  }
+}
\ No newline at end of file

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeManager.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManager.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeManager.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeManager.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManager.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeManager.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeManager.java Thu Jul  3 15:55:06 2008
@@ -15,52 +15,55 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.IOException;
 import java.util.SortedSet;
 
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+
 /**
  * Generic upgrade manager.
  * 
  * {@link #broadcastCommand} is the command that should be 
  *
  */
-abstract class UpgradeManager {
+public abstract class UpgradeManager {
   protected SortedSet<Upgradeable> currentUpgrades = null;
   protected boolean upgradeState = false; // true if upgrade is in progress
   protected int upgradeVersion = 0;
   protected UpgradeCommand broadcastCommand = null;
 
-  synchronized UpgradeCommand getBroadcastCommand() {
+  public synchronized UpgradeCommand getBroadcastCommand() {
     return this.broadcastCommand;
   }
 
-  boolean getUpgradeState() {
+  public boolean getUpgradeState() {
     return this.upgradeState;
   }
 
-  int getUpgradeVersion(){
+  public int getUpgradeVersion(){
     return this.upgradeVersion;
   }
 
-  void setUpgradeState(boolean uState, int uVersion) {
+  public void setUpgradeState(boolean uState, int uVersion) {
     this.upgradeState = uState;
     this.upgradeVersion = uVersion;
   }
 
-  SortedSet<Upgradeable> getDistributedUpgrades() throws IOException {
+  public SortedSet<Upgradeable> getDistributedUpgrades() throws IOException {
     return UpgradeObjectCollection.getDistributedUpgrades(
                                             getUpgradeVersion(), getType());
   }
 
-  short getUpgradeStatus() {
+  public short getUpgradeStatus() {
     if(currentUpgrades == null)
       return 100;
     return currentUpgrades.first().getUpgradeStatus();
   }
 
-  boolean initializeUpgrade() throws IOException {
+  public boolean initializeUpgrade() throws IOException {
     currentUpgrades = getDistributedUpgrades();
     if(currentUpgrades == null) {
       // set new upgrade state
@@ -73,14 +76,14 @@
     return true;
   }
 
-  boolean isUpgradeCompleted() {
+  public boolean isUpgradeCompleted() {
     if (currentUpgrades == null) {
       return true;
     }
     return false;
   }
 
-  abstract FSConstants.NodeType getType();
-  abstract boolean startUpgrade() throws IOException;
-  abstract void completeUpgrade() throws IOException;
+  public abstract FSConstants.NodeType getType();
+  public abstract boolean startUpgrade() throws IOException;
+  public abstract void completeUpgrade() throws IOException;
 }

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObject.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObject.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObject.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObject.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObject.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObject.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObject.java Thu Jul  3 15:55:06 2008
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.IOException;
 
-import org.apache.hadoop.dfs.UpgradeObjectCollection.UOSignature;
+import org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection.UOSignature;
 
 /**
  * Abstract upgrade object.
@@ -27,7 +27,7 @@
  * Contains default implementation of common methods of {@link Upgradeable}
  * interface.
  */
-abstract class UpgradeObject implements Upgradeable {
+public abstract class UpgradeObject implements Upgradeable {
   protected short status;
   
   public short getUpgradeStatus() {

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectCollection.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectCollection.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeObjectCollection.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java Thu Jul  3 15:55:06 2008
@@ -15,12 +15,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.IOException;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.datanode.GenerationStampUpgradeDatanode;
+import org.apache.hadoop.hdfs.server.namenode.GenerationStampUpgradeNamenode;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -28,7 +31,7 @@
  *
  * Upgrade objects should be registered here before they can be used. 
  */
-class UpgradeObjectCollection {
+public class UpgradeObjectCollection {
   static {
     initialize();
     // Registered distributed upgrade objects here
@@ -109,7 +112,7 @@
     upgradeTable.add(new UOSignature(uo));
   }
 
-  static SortedSet<Upgradeable> getDistributedUpgrades(int versionFrom, 
+  public static SortedSet<Upgradeable> getDistributedUpgrades(int versionFrom, 
                                                        FSConstants.NodeType type
                                                        ) throws IOException {
     assert FSConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version " 

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeStatusReport.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeStatusReport.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/UpgradeStatusReport.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.DataInput;
 import java.io.DataOutput;

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Upgradeable.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Upgradeable.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Upgradeable.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Upgradeable.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Upgradeable.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/Upgradeable.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Upgradeable.java Thu Jul  3 15:55:06 2008
@@ -15,10 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.common;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+
 /**
  * Common interface for distributed upgrade objects.
  * 

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java?rev=673857&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java Thu Jul  3 15:55:06 2008
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+public final class Util {
+  /**
+   * Current system time.
+   * @return current time in msec.
+   */
+  public static long now() {
+    return System.currentTimeMillis();
+  }
+}
\ No newline at end of file

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockMetadataHeader.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockMetadataHeader.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/BlockMetadataHeader.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.BufferedInputStream;
 import java.io.DataInputStream;
@@ -25,6 +25,7 @@
 import java.io.IOException;
 
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.DataChecksum;
 
 
 /**

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataBlockScanner.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataBlockScanner.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataBlockScanner.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.BufferedReader;
 import java.io.Closeable;
@@ -46,7 +46,10 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.DataNode.BlockSender;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockSender;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
 
@@ -268,7 +271,7 @@
   }
 
   /** @return the last scan time */
-  synchronized long getLastScanTime(Block block) {
+  public synchronized long getLastScanTime(Block block) {
     BlockScanInfo info = blockMap.get(block);
     return info == null? 0: info.lastScanTime;
   }

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataNode.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataNode.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Thu Jul  3 15:55:06 2008
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 import org.apache.commons.logging.*;
 
@@ -31,11 +31,30 @@
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.mapred.StatusHttpServer;
-import org.apache.hadoop.dfs.BlockCommand;
-import org.apache.hadoop.dfs.DatanodeProtocol;
-import org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream;
-import org.apache.hadoop.dfs.datanode.metrics.DataNodeMetrics;
-import org.apache.hadoop.dfs.BlockMetadataHeader;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.StreamFile;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
+import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 
 import java.io.*;
 import java.net.*;
@@ -81,7 +100,7 @@
  **********************************************************/
 public class DataNode extends Configured 
     implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants, Runnable {
-  public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.DataNode");
+  public static final Log LOG = LogFactory.getLog(DataNode.class.getName());
 
   /**
    * Use {@link NetUtils#createSocketAddr(String)} instead.
@@ -99,14 +118,14 @@
    */
   private static final int MIN_BUFFER_WITH_TRANSFERTO = 64*1024;
   
-  DatanodeProtocol namenode = null;
-  FSDatasetInterface data = null;
-  DatanodeRegistration dnRegistration = null;
+  public DatanodeProtocol namenode = null;
+  public FSDatasetInterface data = null;
+  public DatanodeRegistration dnRegistration = null;
 
   volatile boolean shouldRun = true;
   private LinkedList<Block> receivedBlockList = new LinkedList<Block>();
   private LinkedList<String> delHints = new LinkedList<String>();
-  final static String EMPTY_DEL_HINT = "";
+  public final static String EMPTY_DEL_HINT = "";
   int xmitsInProgress = 0;
   Daemon dataXceiveServer = null;
   ThreadGroup threadGroup = null;
@@ -132,8 +151,8 @@
   private boolean transferToAllowed = true;
   private int writePacketSize = 0;
   
-  DataBlockScanner blockScanner = null;
-  Daemon blockScannerThread = null;
+  public DataBlockScanner blockScanner = null;
+  public Daemon blockScannerThread = null;
   
   private static final Random R = new Random();
 
@@ -148,13 +167,13 @@
   private long estimateBlockSize;
   
   // The following three fields are to support balancing
-  final static short MAX_BALANCING_THREADS = 5;
+  public final static short MAX_BALANCING_THREADS = 5;
   private Semaphore balancingSem = new Semaphore(MAX_BALANCING_THREADS);
   long balanceBandwidth;
   private Throttler balancingThrottler;
 
   // For InterDataNodeProtocol
-  Server ipcServer;
+  public Server ipcServer;
   
   // Record all sockets opend for data transfer
   Map<Socket, Socket> childSockets = Collections.synchronizedMap(
@@ -259,7 +278,7 @@
           //Equivalent of following (can't do because Simulated is in test dir)
           //  this.data = new SimulatedFSDataset(conf);
           this.data = (FSDatasetInterface) ReflectionUtils.newInstance(
-              Class.forName("org.apache.hadoop.dfs.SimulatedFSDataset"), conf);
+              Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf);
         } catch (ClassNotFoundException e) {
           throw new IOException(StringUtils.stringifyException(e));
         }
@@ -409,7 +428,7 @@
     return datanodeObject;
   } 
 
-  static InterDatanodeProtocol createInterDataNodeProtocolProxy(
+  public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
       DatanodeID datanodeid, Configuration conf) throws IOException {
     InetSocketAddress addr = NetUtils.createSocketAddr(
         datanodeid.getHost() + ":" + datanodeid.getIpcPort());
@@ -440,7 +459,7 @@
     return "<namenode>";
   }
 
-  static void setNewStorageID(DatanodeRegistration dnReg) {
+  public static void setNewStorageID(DatanodeRegistration dnReg) {
     /* Return 
      * "DS-randInt-ipaddr-currentTimeMillis"
      * It is considered extermely rare for all these numbers to match
@@ -1468,7 +1487,7 @@
    * This class is thread safe. It can be shared by multiple threads.
    * The parameter bandwidthPerSec specifies the total bandwidth shared by threads.
    */
-  static class Throttler {
+  public static class Throttler {
     private long period;          // period over which bw is imposed
     private long periodExtension; // Max period over which bw accumulates.
     private long bytesPerPeriod; // total number of bytes can be sent in each period
@@ -1479,7 +1498,7 @@
     /** Constructor 
      * @param bandwidthPerSec bandwidth allowed in bytes per second. 
      */
-    Throttler(long bandwidthPerSec) {
+    public Throttler(long bandwidthPerSec) {
       this(500, bandwidthPerSec);  // by default throttling period is 500ms 
     }
 
@@ -1636,7 +1655,7 @@
    ************************************************************************ */
   
   /** Header size for a packet */
-  static final int PKT_HEADER_LEN = ( 4 + /* Packet payload length */
+  public static final int PKT_HEADER_LEN = ( 4 + /* Packet payload length */
                                       8 + /* offset in block */
                                       8 + /* seqno */
                                       1   /* isLastPacketInBlock */);
@@ -2903,7 +2922,7 @@
   /** Start a single datanode daemon and wait for it to finish.
    *  If this thread is specifically interrupted, it will stop waiting.
    */
-  static void runDatanodeDaemon(DataNode dn) throws IOException {
+  public static void runDatanodeDaemon(DataNode dn) throws IOException {
     if (dn != null) {
       //register datanode
       dn.register();
@@ -2916,7 +2935,7 @@
   /** Instantiate a single datanode object. This must be run by invoking
    *  {@link DataNode#runDatanodeDaemon(DataNode)} subsequently. 
    */
-  static DataNode instantiateDataNode(String args[],
+  public static DataNode instantiateDataNode(String args[],
                                       Configuration conf) throws IOException {
     if (conf == null)
       conf = new Configuration();
@@ -2938,7 +2957,7 @@
   /** Instantiate & Start a single datanode daemon and wait for it to finish.
    *  If this thread is specifically interrupted, it will stop waiting.
    */
-  static DataNode createDataNode(String args[],
+  public static DataNode createDataNode(String args[],
                                  Configuration conf) throws IOException {
     DataNode dn = instantiateDataNode(args, conf);
     runDatanodeDaemon(dn);
@@ -2964,7 +2983,7 @@
    * no directory from this directory list can be created.
    * @throws IOException
    */
-  static DataNode makeInstance(String[] dataDirs, Configuration conf)
+  public static DataNode makeInstance(String[] dataDirs, Configuration conf)
     throws IOException {
     ArrayList<File> dirs = new ArrayList<File>();
     for (int i = 0; i < dataDirs.length; i++) {
@@ -3078,12 +3097,12 @@
     if (LOG.isDebugEnabled()) {
       LOG.debug("block=" + block);
     }
-    Block stored = data.getStoredBlock(block.blkid);
+    Block stored = data.getStoredBlock(block.getBlockId());
     return stored == null?
         null: new BlockMetaDataInfo(stored, blockScanner.getLastScanTime(stored));
   }
 
-  Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) {
+  public Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) {
     Daemon d = new Daemon(threadGroup, new Runnable() {
       public void run() {
         LeaseManager.recoverBlocks(blocks, targets, namenode, getConf());

Copied: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (from r673837, hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataStorage.java)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?p2=hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java&p1=hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataStorage.java&r1=673837&r2=673857&rev=673857&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/DataStorage.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Thu Jul  3 15:55:06 2008
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -29,8 +29,13 @@
 import java.util.Iterator;
 import java.util.Properties;
 
-import org.apache.hadoop.dfs.FSConstants.StartupOption;
-import org.apache.hadoop.dfs.FSConstants.NodeType;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.NodeType;
+import org.apache.hadoop.hdfs.protocol.FSConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.fs.FileUtil.HardLink;
 import org.apache.hadoop.io.IOUtils;
@@ -40,7 +45,7 @@
  * <p>
  * @see Storage
  */
-class DataStorage extends Storage {
+public class DataStorage extends Storage {
   // Constants
   final static String BLOCK_SUBDIR_PREFIX = "subdir";
   final static String BLOCK_FILE_PREFIX = "blk_";
@@ -58,12 +63,12 @@
     this.storageID = strgID;
   }
   
-  DataStorage(StorageInfo storageInfo, String strgID) {
+  public DataStorage(StorageInfo storageInfo, String strgID) {
     super(NodeType.DATA_NODE, storageInfo);
     this.storageID = strgID;
   }
 
-  String getStorageID() {
+  public String getStorageID() {
     return storageID;
   }
   
@@ -177,7 +182,7 @@
       storageID = ssid;
   }
 
-  boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+  public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
     File oldF = new File(sd.root, "storage");
     if (!oldF.exists())
       return false;



Mime
View raw message