hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sra...@apache.org
Subject svn commit: r1179877 [2/3] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/ src/main/java/or...
Date Thu, 06 Oct 2011 21:58:23 GMT
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeWireProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeWireProtocol.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeWireProtocol.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeWireProtocol.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,482 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import org.apache.avro.reflect.Nullable;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
+import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.io.EnumSetWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.ipc.ProtocolInfo;
+
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+
+/**********************************************************************
+ * This class defines the actual protocol used to communicate with the
+ * NN via RPC using writable types.
+ * The parameters in the methods which are specified in the
+ * package are separate from those used internally in the NN and DFSClient
+ * and hence need to be converted using {@link ClientNamenodeProtocolTranslatorR23}
+ * and {@link ClientNamenodeProtocolServerSideTranslatorR23}.
+ *
+ **********************************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+@KerberosInfo(
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+@TokenInfo(DelegationTokenSelector.class)
+@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME)
+public interface ClientNamenodeWireProtocol extends VersionedProtocol {
+
+  /**
+   * Changes to the protocol:
+   * 
+   * Do NOT change a method's signature (ie name, parameters, parameter types
+   * or exceptions thrown). If you need to make changes then ADD new methods and
+   * new data types.
+   * Hence if you maintain compatibility you will NOT have to change
+   * the version number below. The version number is changed ONLY
+   * if you break compatibility (which is a big deal).
+   * Hence the version number is really a Major Version Number.
+   *
+   * The log of historical changes prior to 69 can be retrieved from the svn.
+   * ALL changes since version 69L are recorded.
+   * Version number is changed ONLY for Incompatible changes.
+   *  (note previously we used to change version number for both
+   *  compatible and incompatible changes).
+   * 69: Eliminate overloaded method names. (Compatible)
+   * 70: Separation of Datatypes - the client namenode protocol is implemented
+   *     in this class instead of in 
+   *           {@link org.apache.hadoop.hdfs.protocol.ClientProtocol}
+   *     as was done prior to version 70.
+   */
+  public static final long versionID = 70L;
+  
+  ///////////////////////////////////////
+  // File contents
+  ///////////////////////////////////////
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getBlockLocations}
+   */
+  @Nullable
+  public LocatedBlocksWritable getBlockLocations(String src,
+                                         long offset,
+                                         long length) 
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getServerDefaults()}
+   */
+  public FsServerDefaultsWritable getServerDefaults() throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#create(String, 
+   * org.apache.hadoop.fs.permission.FsPermission, String, 
+   * EnumSetWritable, boolean, short, long)}
+   */
+  public void create(String src, FsPermissionWritable masked, String clientName,
+      EnumSetWritable<CreateFlag> flag, boolean createParent,
+      short replication, long blockSize) throws AccessControlException,
+      AlreadyBeingCreatedException, DSQuotaExceededException,
+      FileAlreadyExistsException, FileNotFoundException,
+      NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append(String, String)}
+   */
+  public LocatedBlockWritable append(String src, String clientName)
+      throws AccessControlException, DSQuotaExceededException,
+      FileNotFoundException, SafeModeException, UnresolvedLinkException,
+      IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setReplication(String, short)}
+   */
+  public boolean setReplication(String src, short replication)
+      throws AccessControlException, DSQuotaExceededException,
+      FileNotFoundException, SafeModeException, UnresolvedLinkException,
+      IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setPermission(String,
+   * org.apache.hadoop.fs.permission.FsPermission)}
+   */
+  public void setPermission(String src, FsPermissionWritable permission)
+      throws AccessControlException, FileNotFoundException, SafeModeException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setOwner(String, String, String)}
+   */
+  public void setOwner(String src, String username, String groupname)
+      throws AccessControlException, FileNotFoundException, SafeModeException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#abandonBlock(
+   * org.apache.hadoop.hdfs.protocol.ExtendedBlock, String, String)}
+   */
+  public void abandonBlock(ExtendedBlockWritable b, String src, String holder)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock(String, 
+   * String, org.apache.hadoop.hdfs.protocol.ExtendedBlock, 
+   * org.apache.hadoop.hdfs.protocol.DatanodeInfo[])}
+   */
+  public LocatedBlockWritable addBlock(String src, String clientName,
+      @Nullable ExtendedBlockWritable previous, @Nullable DatanodeInfoWritable[] excludeNodes)
+      throws AccessControlException, FileNotFoundException,
+      NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
+      IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getAdditionalDatanode}
+   */
+  public LocatedBlockWritable getAdditionalDatanode(
+      final String src, final ExtendedBlockWritable blk,
+      final DatanodeInfoWritable[] existings,
+      final DatanodeInfoWritable[] excludes,
+      final int numAdditionalNodes, final String clientName
+      ) throws AccessControlException, FileNotFoundException,
+          SafeModeException, UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#complete}
+   */
+  public boolean complete(
+      String src, String clientName, ExtendedBlockWritable last)
+      throws AccessControlException, FileNotFoundException, SafeModeException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks}
+   */
+  public void reportBadBlocks(LocatedBlockWritable[] blocks) throws IOException;
+
+  ///////////////////////////////////////
+  // Namespace management
+  ///////////////////////////////////////
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#rename(String, String)}
+   */
+  public boolean rename(String src, String dst) 
+      throws UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#concat(String, String[])}
+   */
+  public void concat(String trg, String[] srcs) 
+      throws IOException, UnresolvedLinkException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#rename2}
+   */
+  public void rename2(String src, String dst, Options.Rename... options)
+      throws AccessControlException, DSQuotaExceededException,
+      FileAlreadyExistsException, FileNotFoundException,
+      NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#delete(String, boolean)}
+   */
+  public boolean delete(String src, boolean recursive)
+      throws AccessControlException, FileNotFoundException, SafeModeException,
+      UnresolvedLinkException, IOException;
+  
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#mkdirs}
+   */
+  public boolean mkdirs(
+      String src, FsPermissionWritable masked, boolean createParent)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, NSQuotaExceededException,
+      ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
+      IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getListing}
+   */
+  public DirectoryListingWritable getListing(String src,
+                                     byte[] startAfter,
+                                     boolean needLocation)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException;
+
+  ///////////////////////////////////////
+  // System issues and management
+  ///////////////////////////////////////
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#renewLease(String)}
+   */
+  public void renewLease(String clientName) throws AccessControlException,
+      IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#recoverLease(String, String)}
+   */
+  public boolean recoverLease(String src, String clientName) throws IOException;
+
+  public int GET_STATS_CAPACITY_IDX = 0;
+  public int GET_STATS_USED_IDX = 1;
+  public int GET_STATS_REMAINING_IDX = 2;
+  public int GET_STATS_UNDER_REPLICATED_IDX = 3;
+  public int GET_STATS_CORRUPT_BLOCKS_IDX = 4;
+  public int GET_STATS_MISSING_BLOCKS_IDX = 5;
+  
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getStats()}
+   */
+  public long[] getStats() throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getDatanodeReport}
+   */
+  public DatanodeInfoWritable[] getDatanodeReport(
+      HdfsConstants.DatanodeReportType type)
+      throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getPreferredBlockSize}
+   */
+  public long getPreferredBlockSize(String filename) 
+      throws IOException, UnresolvedLinkException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction)}
+   */
+  public boolean setSafeMode(HdfsConstants.SafeModeAction action) 
+      throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()}
+   */
+  public void saveNamespace() throws AccessControlException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String)}
+   */
+  public boolean restoreFailedStorage(String arg) throws AccessControlException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#refreshNodes()}
+   */
+  public void refreshNodes() throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#finalizeUpgrade()}
+   */
+  public void finalizeUpgrade() throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#distributedUpgradeProgress}
+   */
+  @Nullable
+  public UpgradeStatusReportWritable distributedUpgradeProgress(
+      UpgradeAction action) 
+      throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#listCorruptFileBlocks(String, String)}
+   */
+  public CorruptFileBlocksWritable
+    listCorruptFileBlocks(String path, String cookie)
+    throws IOException;
+  
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#metaSave(String)}
+   */
+  public void metaSave(String filename) throws IOException;
+  
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setBalancerBandwidth(long)}
+   */
+  public void setBalancerBandwidth(long bandwidth) throws IOException;
+  
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getFileInfo(String)}
+   */
+  @Nullable
+  public HdfsFileStatusWritable getFileInfo(String src)
+      throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getFileLinkInfo(String)}
+   */
+  public HdfsFileStatusWritable getFileLinkInfo(String src)
+      throws AccessControlException, UnresolvedLinkException, IOException;
+  
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getContentSummary(String)}
+   */
+  public ContentSummaryWritable getContentSummary(String path)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)}
+   */
+  public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#fsync(String, String)}
+   */
+  public void fsync(String src, String client) 
+      throws AccessControlException, FileNotFoundException, 
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setTimes(String, long, long)}
+   */
+  public void setTimes(String src, long mtime, long atime)
+      throws AccessControlException, FileNotFoundException, 
+      UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#createSymlink}
+   */
+  public void createSymlink(
+      String target, String link, FsPermissionWritable dirPerm,
+      boolean createParent) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
+      IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getLinkTarget(String)}
+   */
+  public String getLinkTarget(String path) throws AccessControlException,
+      FileNotFoundException, IOException; 
+  
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#updateBlockForPipeline}
+   */
+  public LocatedBlockWritable updateBlockForPipeline(
+      ExtendedBlockWritable block, String clientName) throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#updatePipeline}
+   */
+  public void updatePipeline(String clientName, ExtendedBlockWritable oldBlock, 
+      ExtendedBlockWritable newBlock, DatanodeIDWritable[] newNodes)
+      throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getDelegationToken(Text)}
+   */
+  public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) 
+      throws IOException;
+
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#renewDelegationToken(Token)}
+   */
+  public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
+      throws IOException;
+  
+  /**
+   * The specification of this method matches that of
+   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#cancelDelegationToken(Token)}
+   */
+  public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
+      throws IOException;
+  
+  /**
+   * This method is defined to get the protocol signature using 
+   * the R23 protocol - hence we have added the suffix of 2 the method name
+   * to avoid conflict.
+   */
+  public org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable
+           getProtocolSignature2(String protocol, 
+      long clientVersion,
+      int clientMethodsHash) throws IOException;
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ContentSummaryWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ContentSummaryWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ContentSummaryWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ContentSummaryWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+
+/** Store the summary of a content (a directory or a file). */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class ContentSummaryWritable implements Writable{
+  private long length;
+  private long fileCount;
+  private long directoryCount;
+  private long quota;
+  private long spaceConsumed;
+  private long spaceQuota;
+  
+  
+  public static org.apache.hadoop.fs.ContentSummary convert(ContentSummaryWritable cs) {
+    if (cs == null) return null;
+    return new org.apache.hadoop.fs.ContentSummary(
+      cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(),
+      cs.getSpaceConsumed(), cs.getSpaceQuota());
+  }
+  
+  public static ContentSummaryWritable convert(org.apache.hadoop.fs.ContentSummary cs) {
+    if (cs == null) return null;
+    return new  ContentSummaryWritable(
+      cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(),
+      cs.getSpaceConsumed(), cs.getSpaceQuota());
+  }
+
+  /** Constructor */
+  public ContentSummaryWritable() {}
+  
+  /** Constructor */
+  public ContentSummaryWritable(long length, long fileCount, long directoryCount) {
+    this(length, fileCount, directoryCount, -1L, length, -1L);
+  }
+
+  /** Constructor */
+  public ContentSummaryWritable(
+      long length, long fileCount, long directoryCount, long quota,
+      long spaceConsumed, long spaceQuota) {
+    this.length = length;
+    this.fileCount = fileCount;
+    this.directoryCount = directoryCount;
+    this.quota = quota;
+    this.spaceConsumed = spaceConsumed;
+    this.spaceQuota = spaceQuota;
+  }
+
+  /** @return the length */
+  public long getLength() {return length;}
+
+  /** @return the directory count */
+  public long getDirectoryCount() {return directoryCount;}
+
+  /** @return the file count */
+  public long getFileCount() {return fileCount;}
+  
+  /** Return the directory quota */
+  public long getQuota() {return quota;}
+  
+  /** Retuns (disk) space consumed */ 
+  public long getSpaceConsumed() {return spaceConsumed;}
+
+  /** Returns (disk) space quota */
+  public long getSpaceQuota() {return spaceQuota;}
+  
+  @InterfaceAudience.Private
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeLong(length);
+    out.writeLong(fileCount);
+    out.writeLong(directoryCount);
+    out.writeLong(quota);
+    out.writeLong(spaceConsumed);
+    out.writeLong(spaceQuota);
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    this.length = in.readLong();
+    this.fileCount = in.readLong();
+    this.directoryCount = in.readLong();
+    this.quota = in.readLong();
+    this.spaceConsumed = in.readLong();
+    this.spaceQuota = in.readLong();
+  }
+  
+  /** 
+   * Output format:
+   * <----12----> <----12----> <-------18------->
+   *    DIR_COUNT   FILE_COUNT       CONTENT_SIZE FILE_NAME    
+   */
+  private static final String STRING_FORMAT = "%12d %12d %18d ";
+  /** 
+   * Output format:
+   * <----12----> <----15----> <----15----> <----15----> <----12----> <----12----> <-------18------->
+   *    QUOTA   REMAINING_QUATA SPACE_QUOTA SPACE_QUOTA_REM DIR_COUNT   FILE_COUNT   CONTENT_SIZE     FILE_NAME    
+   */
+  private static final String QUOTA_STRING_FORMAT = "%12s %15s ";
+  private static final String SPACE_QUOTA_STRING_FORMAT = "%15s %15s ";
+  
+  /** The header string */
+  private static final String HEADER = String.format(
+      STRING_FORMAT.replace('d', 's'), "directories", "files", "bytes");
+
+  private static final String QUOTA_HEADER = String.format(
+      QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, 
+      "quota", "remaining quota", "space quota", "reamaining quota") +
+      HEADER;
+  
+  /** Return the header of the output.
+   * if qOption is false, output directory count, file count, and content size;
+   * if qOption is true, output quota and remaining quota as well.
+   * 
+   * @param qOption a flag indicating if quota needs to be printed or not
+   * @return the header of the output
+   */
+  public static String getHeader(boolean qOption) {
+    return qOption ? QUOTA_HEADER : HEADER;
+  }
+  
+  @Override
+  public String toString() {
+    return toString(true);
+  }
+
+  /** Return the string representation of the object in the output format.
+   * if qOption is false, output directory count, file count, and content size;
+   * if qOption is true, output quota and remaining quota as well.
+   * 
+   * @param qOption a flag indicating if quota needs to be printed or not
+   * @return the string representation of the object
+   */
+  public String toString(boolean qOption) {
+    String prefix = "";
+    if (qOption) {
+      String quotaStr = "none";
+      String quotaRem = "inf";
+      String spaceQuotaStr = "none";
+      String spaceQuotaRem = "inf";
+      
+      if (quota>0) {
+        quotaStr = Long.toString(quota);
+        quotaRem = Long.toString(quota-(directoryCount+fileCount));
+      }
+      if (spaceQuota>0) {
+        spaceQuotaStr = Long.toString(spaceQuota);
+        spaceQuotaRem = Long.toString(spaceQuota - spaceConsumed);        
+      }
+      
+      prefix = String.format(QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, 
+                             quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
+    }
+    
+    return prefix + String.format(STRING_FORMAT, directoryCount, 
+                                  fileCount, length);
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/CorruptFileBlocksWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/CorruptFileBlocksWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/CorruptFileBlocksWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/CorruptFileBlocksWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.Text;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Contains a list of paths corresponding to corrupt files and a cookie
+ * used for iterative calls to NameNode.listCorruptFileBlocks.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class CorruptFileBlocksWritable implements Writable {
+
+  private String[] files;
+  private String cookie;
+
+  static public org.apache.hadoop.hdfs.protocol.CorruptFileBlocks 
+    convertCorruptFileBlocks(CorruptFileBlocksWritable c) {
+    if (c == null) return null;
+    return new org.apache.hadoop.hdfs.protocol.CorruptFileBlocks(
+        c.getFiles(), c.getCookie());
+  }
+  
+  public static CorruptFileBlocksWritable convertCorruptFilesBlocks(
+      org.apache.hadoop.hdfs.protocol.CorruptFileBlocks c) {
+    if (c == null) return null;
+    return new CorruptFileBlocksWritable(c.getFiles(), c.getCookie());
+  }
+ 
+  public CorruptFileBlocksWritable() {
+    this(new String[0], "");
+  }
+
+  public CorruptFileBlocksWritable(String[] files, String cookie) {
+    this.files = files;
+    this.cookie = cookie;
+  }
+ 
+  public String[] getFiles() {
+    return files;
+  }
+
+  public String getCookie() {
+    return cookie;
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    int fileCount = in.readInt();
+    files = new String[fileCount];
+    for (int i = 0; i < fileCount; i++) {
+      files[i] = Text.readString(in);
+    }
+    cookie = Text.readString(in);
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(files.length);
+    for (int i = 0; i < files.length; i++) {
+      Text.writeString(out, files[i]);
+    }
+    Text.writeString(out, cookie);
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DatanodeIDWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DatanodeIDWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DatanodeIDWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DatanodeIDWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
+import org.apache.hadoop.io.Writable;
+
+/**
+ * DatanodeID is composed of the data node 
+ * name (hostname:portNumber) and the data storage ID, 
+ * which it currently represents.
+ * 
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class DatanodeIDWritable implements Writable {
+  public static final DatanodeIDWritable[] EMPTY_ARRAY = {}; 
+
+  public String name;      /// hostname:portNumber
+  public String storageID; /// unique per cluster storageID
+  protected int infoPort;     /// the port where the infoserver is running
+  public int ipcPort;     /// the port where the ipc server is running
+
+  
+  static public DatanodeIDWritable[] 
+      convertDatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID[] did) {
+    if (did == null) return null;
+    final int len = did.length;
+    DatanodeIDWritable[] result = new DatanodeIDWritable[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = convertDatanodeID(did[i]);
+    }
+    return result;
+  }
+  
+  static public org.apache.hadoop.hdfs.protocol.DatanodeID[] 
+      convertDatanodeID(DatanodeIDWritable[] did) {
+    if (did == null) return null;
+    final int len = did.length;
+    org.apache.hadoop.hdfs.protocol.DatanodeID[] result = new org.apache.hadoop.hdfs.protocol.DatanodeID[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = convertDatanodeID(did[i]);
+    }
+    return result;
+  }
+  
+  static public org.apache.hadoop.hdfs.protocol.DatanodeID convertDatanodeID(
+      DatanodeIDWritable did) {
+    if (did == null) return null;
+    return new org.apache.hadoop.hdfs.protocol.DatanodeID(
+        did.getName(), did.getStorageID(), did.getInfoPort(), did.getIpcPort());
+    
+  }
+  
+  public static DatanodeIDWritable convertDatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID from) {
+    return new DatanodeIDWritable(from.getName(),
+        from.getStorageID(),
+        from.getInfoPort(),
+        from.getIpcPort());
+  }
+  
+  /** Equivalent to DatanodeID(""). */
+  public DatanodeIDWritable() {this("");}
+
+  /** Equivalent to DatanodeID(nodeName, "", -1, -1). */
+  public DatanodeIDWritable(String nodeName) {this(nodeName, "", -1, -1);}
+
+  /**
+   * DatanodeID copy constructor
+   * 
+   * @param from
+   */
+  public DatanodeIDWritable(DatanodeIDWritable from) {
+    this(from.getName(),
+        from.getStorageID(),
+        from.getInfoPort(),
+        from.getIpcPort());
+  }
+  
+  /**
+   * Create DatanodeID
+   * @param nodeName (hostname:portNumber) 
+   * @param storageID data storage ID
+   * @param infoPort info server port 
+   * @param ipcPort ipc server port
+   */
+  public DatanodeIDWritable(String nodeName, String storageID,
+      int infoPort, int ipcPort) {
+    this.name = nodeName;
+    this.storageID = storageID;
+    this.infoPort = infoPort;
+    this.ipcPort = ipcPort;
+  }
+  
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void setInfoPort(int infoPort) {
+    this.infoPort = infoPort;
+  }
+  
+  public void setIpcPort(int ipcPort) {
+    this.ipcPort = ipcPort;
+  }
+  
+  /**
+   * @return hostname:portNumber.
+   */
+  public String getName() {
+    return name;
+  }
+  
+  /**
+   * @return data storage ID.
+   */
+  public String getStorageID() {
+    return this.storageID;
+  }
+
+  /**
+   * @return infoPort (the port at which the HTTP server bound to)
+   */
+  public int getInfoPort() {
+    return infoPort;
+  }
+
+  /**
+   * @return ipcPort (the port at which the IPC server bound to)
+   */
+  public int getIpcPort() {
+    return ipcPort;
+  }
+
+  /**
+   * sets the data storage ID.
+   */
+  public void setStorageID(String storageID) {
+    this.storageID = storageID;
+  }
+
+  /**
+   * @return hostname and no :portNumber.
+   */
+  public String getHost() {
+    int colon = name.indexOf(":");
+    if (colon < 0) {
+      return name;
+    } else {
+      return name.substring(0, colon);
+    }
+  }
+  
+  public int getPort() {
+    int colon = name.indexOf(":");
+    if (colon < 0) {
+      return 50010; // default port.
+    }
+    return Integer.parseInt(name.substring(colon+1));
+  }
+
+  
+  public String toString() {
+    return name;
+  }    
+
+  /////////////////////////////////////////////////
+  // Writable
+  /////////////////////////////////////////////////
+  @Override
+  public void write(DataOutput out) throws IOException {
+    DeprecatedUTF8.writeString(out, name);
+    DeprecatedUTF8.writeString(out, storageID);
+    out.writeShort(infoPort);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    name = DeprecatedUTF8.readString(in);
+    storageID = DeprecatedUTF8.readString(in);
+    // the infoPort read could be negative, if the port is a large number (more
+    // than 15 bits in storage size (but less than 16 bits).
+    // So chop off the first two bytes (and hence the signed bits) before 
+    // setting the field.
+    this.infoPort = in.readShort() & 0x0000ffff;
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DatanodeInfoWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DatanodeInfoWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DatanodeInfoWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DatanodeInfoWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,328 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+
+import org.apache.avro.reflect.Nullable;
+
+/** 
+ * DatanodeInfo represents the status of a DataNode.
+ * This object is used for communication in the
+ * Datanode Protocol and the Client Protocol.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class DatanodeInfoWritable extends DatanodeIDWritable  {
+  protected long capacity;
+  protected long dfsUsed;
+  protected long remaining;
+  protected long blockPoolUsed;
+  protected long lastUpdate;
+  protected int xceiverCount;
+  protected String location = NetworkTopology.DEFAULT_RACK;
+
+  /** HostName as supplied by the datanode during registration as its 
+   * name. Namenode uses datanode IP address as the name.
+   */
+  @Nullable
+  protected String hostName = null;
+  
+  // administrative states of a datanode
+  public enum AdminStates {
+    NORMAL(org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates.NORMAL.toString()), 
+    DECOMMISSION_INPROGRESS(org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates.DECOMMISSION_INPROGRESS.toString()), 
+    DECOMMISSIONED(org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates.DECOMMISSIONED.toString());
+
+    final String value;
+
+    AdminStates(final String v) {
+      this.value = v;
+    }
+
+    public String toString() {
+      return value;
+    }
+    
+    public static AdminStates fromValue(final String value) {
+      for (AdminStates as : AdminStates.values()) {
+        if (as.value.equals(value)) return as;
+      }
+      throw new HadoopIllegalArgumentException("Unknown Admin State" + value);
+    }
+  }
+
+  @Nullable
+  protected AdminStates adminState;
+  
+  static public org.apache.hadoop.hdfs.protocol.DatanodeInfo convertDatanodeInfo(DatanodeInfoWritable di) {
+    if (di == null) return null;
+    return new org.apache.hadoop.hdfs.protocol.DatanodeInfo(
+        new org.apache.hadoop.hdfs.protocol.DatanodeID(di.getName(), di.getStorageID(), di.getInfoPort(), di.getIpcPort()),
+        di.getNetworkLocation(), di.getHostName(),
+         di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
+        di.getBlockPoolUsed()  ,  di.getLastUpdate() , di.getXceiverCount() ,
+        org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates.fromValue(di.getAdminState().value)); 
+  }
+  
+  
+  static public org.apache.hadoop.hdfs.protocol.DatanodeInfo[] convertDatanodeInfo(DatanodeInfoWritable di[]) {
+    if (di == null) return null;
+    org.apache.hadoop.hdfs.protocol.DatanodeInfo[] result = new org.apache.hadoop.hdfs.protocol.DatanodeInfo[di.length];
+    for (int i = 0; i < di.length; i++) {
+      result[i] = convertDatanodeInfo(di[i]);
+    }    
+    return result;
+  }
+  
+  static public DatanodeInfoWritable[] convertDatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo[] di) {
+    if (di == null) return null;
+    DatanodeInfoWritable[] result = new DatanodeInfoWritable[di.length];
+    for (int i = 0; i < di.length; i++) {
+      result[i] = new DatanodeInfoWritable(new DatanodeIDWritable(di[i].getName(), di[i].getStorageID(), di[i].getInfoPort(), di[i].getIpcPort()),
+          di[i].getNetworkLocation(), di[i].getHostName(),
+          di[i].getCapacity(),  di[i].getDfsUsed(),  di[i].getRemaining(),
+          di[i].getBlockPoolUsed()  ,  di[i].getLastUpdate() , di[i].getXceiverCount() ,
+          AdminStates.fromValue(di[i].getAdminState().toString()));
+    }    
+    return result;
+    
+  }
+
+  public DatanodeInfoWritable() {
+    super();
+    adminState = null;
+  }
+  
+  public DatanodeInfoWritable(DatanodeInfoWritable from) {
+    super(from);
+    this.capacity = from.getCapacity();
+    this.dfsUsed = from.getDfsUsed();
+    this.remaining = from.getRemaining();
+    this.blockPoolUsed = from.getBlockPoolUsed();
+    this.lastUpdate = from.getLastUpdate();
+    this.xceiverCount = from.getXceiverCount();
+    this.location = from.getNetworkLocation();
+    this.adminState = from.adminState;
+    this.hostName = from.hostName;
+  }
+
+  public DatanodeInfoWritable(DatanodeIDWritable nodeID) {
+    super(nodeID);
+    this.capacity = 0L;
+    this.dfsUsed = 0L;
+    this.remaining = 0L;
+    this.blockPoolUsed = 0L;
+    this.lastUpdate = 0L;
+    this.xceiverCount = 0;
+    this.adminState = null;    
+  }
+  
+  protected DatanodeInfoWritable(DatanodeIDWritable nodeID, String location, String hostName) {
+    this(nodeID);
+    this.location = location;
+    this.hostName = hostName;
+  }
+  
+  public DatanodeInfoWritable(DatanodeIDWritable nodeID, String location, String hostName,
+      final long capacity, final long dfsUsed, final long remaining,
+      final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
+      final AdminStates adminState) {
+    this(nodeID, location, hostName);
+    this.capacity = capacity;
+    this.dfsUsed = dfsUsed;
+    this.remaining = remaining;
+    this.blockPoolUsed = blockPoolUsed;
+    this.lastUpdate = lastUpdate;
+    this.xceiverCount = xceiverCount;
+    this.adminState = adminState;
+  }
+  
+  /** The raw capacity. */
+  public long getCapacity() { return capacity; }
+  
+  /** The used space by the data node. */
+  public long getDfsUsed() { return dfsUsed; }
+
+  /** The used space by the block pool on data node. */
+  public long getBlockPoolUsed() { return blockPoolUsed; }
+
+  /** The used space by the data node. */
+  public long getNonDfsUsed() { 
+    long nonDFSUsed = capacity - dfsUsed - remaining;
+    return nonDFSUsed < 0 ? 0 : nonDFSUsed;
+  }
+
+  /** The used space by the data node as percentage of present capacity */
+  public float getDfsUsedPercent() { 
+    return DFSUtil.getPercentUsed(dfsUsed, capacity);
+  }
+
+  /** The raw free space. */
+  public long getRemaining() { return remaining; }
+
+  /** Used space by the block pool as percentage of present capacity */
+  public float getBlockPoolUsedPercent() {
+    return DFSUtil.getPercentUsed(blockPoolUsed, capacity);
+  }
+  
+  /** The remaining space as percentage of configured capacity. */
+  public float getRemainingPercent() { 
+    return DFSUtil.getPercentRemaining(remaining, capacity);
+  }
+
+  /** The time when this information was accurate. */
+  public long getLastUpdate() { return lastUpdate; }
+
+  /** number of active connections */
+  public int getXceiverCount() { return xceiverCount; }
+
+  /** Sets raw capacity. */
+  public void setCapacity(long capacity) { 
+    this.capacity = capacity; 
+  }
+  
+  /** Sets the used space for the datanode. */
+  public void setDfsUsed(long dfsUsed) {
+    this.dfsUsed = dfsUsed;
+  }
+
+  /** Sets raw free space. */
+  public void setRemaining(long remaining) { 
+    this.remaining = remaining; 
+  }
+
+  /** Sets block pool used space */
+  public void setBlockPoolUsed(long bpUsed) { 
+    this.blockPoolUsed = bpUsed; 
+  }
+
+  /** Sets time when this information was accurate. */
+  public void setLastUpdate(long lastUpdate) { 
+    this.lastUpdate = lastUpdate; 
+  }
+
+  /** Sets number of active connections */
+  public void setXceiverCount(int xceiverCount) { 
+    this.xceiverCount = xceiverCount; 
+  }
+
+  /** rack name */
+  public String getNetworkLocation() {return location;}
+    
+  /** Sets the rack name */
+  public void setNetworkLocation(String location) {
+    this.location = NodeBase.normalize(location);
+  }
+  
+  public String getHostName() {
+    return (hostName == null || hostName.length()==0) ? getHost() : hostName;
+  }
+  
+  public void setHostName(String host) {
+    hostName = host;
+  }
+
+  /**
+   * Retrieves the admin state of this node.
+   */
+  public AdminStates getAdminState() {
+    if (adminState == null) {
+      return AdminStates.NORMAL;
+    }
+    return adminState;
+  }
+
+  /**
+   * Sets the admin state of this node.
+   */
+  protected void setAdminState(AdminStates newState) {
+    if (newState == AdminStates.NORMAL) {
+      adminState = null;
+    }
+    else {
+      adminState = newState;
+    }
+  }
+
+  /////////////////////////////////////////////////
+  // Writable
+  /////////////////////////////////////////////////
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (DatanodeInfoWritable.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new DatanodeInfoWritable(); }
+       });
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+
+    out.writeShort(ipcPort);
+
+    out.writeLong(capacity);
+    out.writeLong(dfsUsed);
+    out.writeLong(remaining);
+    out.writeLong(blockPoolUsed);
+    out.writeLong(lastUpdate);
+    out.writeInt(xceiverCount);
+    Text.writeString(out, location);
+    Text.writeString(out, hostName == null? "" : hostName);
+    WritableUtils.writeEnum(out, getAdminState());
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+
+    this.ipcPort = in.readShort() & 0x0000ffff;
+
+    this.capacity = in.readLong();
+    this.dfsUsed = in.readLong();
+    this.remaining = in.readLong();
+    this.blockPoolUsed = in.readLong();
+    this.lastUpdate = in.readLong();
+    this.xceiverCount = in.readInt();
+    this.location = Text.readString(in);
+    this.hostName = Text.readString(in);
+    setAdminState(WritableUtils.readEnum(in, AdminStates.class));
+  }
+
+  /** Read a DatanodeInfo */
+  public static DatanodeInfoWritable read(DataInput in) throws IOException {
+    final DatanodeInfoWritable d = new DatanodeInfoWritable();
+    d.readFields(in);
+    return d;
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DirectoryListingWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DirectoryListingWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DirectoryListingWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/DirectoryListingWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,157 @@
+/* Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * This class defines a partial listing of a directory to support
+ * iterative directory listing.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class DirectoryListingWritable implements Writable {
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (DirectoryListingWritable.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new DirectoryListingWritable(); }
+       });
+  }
+
+  private HdfsFileStatusWritable[] partialListing;
+  private int remainingEntries;
+  
+  public static org.apache.hadoop.hdfs.protocol.DirectoryListing 
+    convertDirectoryListing(DirectoryListingWritable dl) {
+    if (dl == null) return null;
+    return new org.apache.hadoop.hdfs.protocol.DirectoryListing(
+        HdfsFileStatusWritable.convertHdfsFileStatus(
+            dl.getPartialListing()), dl.getRemainingEntries());
+  }
+
+  public static DirectoryListingWritable convertDirectoryListing(
+      org.apache.hadoop.hdfs.protocol.DirectoryListing d) {
+    if (d == null) return null;
+    return new DirectoryListingWritable(
+        org.apache.hadoop.hdfs.protocolR23Compatible.HdfsFileStatusWritable.
+        convertHdfsFileStatus(d.getPartialListing()), d.getRemainingEntries());
+  } 
+  
+  /**
+   * default constructor
+   */
+  public DirectoryListingWritable() {
+  }
+  
+  /**
+   * constructor
+   * @param partialListing a partial listing of a directory
+   * @param remainingEntries number of entries that are left to be listed
+   */
+  public DirectoryListingWritable(HdfsFileStatusWritable[] partialListing, 
+      int remainingEntries) {
+    if (partialListing == null) {
+      throw new IllegalArgumentException("partial listing should not be null");
+    }
+    if (partialListing.length == 0 && remainingEntries != 0) {
+      throw new IllegalArgumentException("Partial listing is empty but " +
+          "the number of remaining entries is not zero");
+    }
+    this.partialListing = partialListing;
+    this.remainingEntries = remainingEntries;
+  }
+
+  /**
+   * Get the partial listing of file status
+   * @return the partial listing of file status
+   */
+  public HdfsFileStatusWritable[] getPartialListing() {
+    return partialListing;
+  }
+  
+  /**
+   * Get the number of remaining entries that are left to be listed
+   * @return the number of remaining entries that are left to be listed
+   */
+  public int getRemainingEntries() {
+    return remainingEntries;
+  }
+  
+  /**
+   * Check if there are more entries that are left to be listed
+   * @return true if there are more entries that are left to be listed;
+   *         return false otherwise.
+   */
+  public boolean hasMore() {
+    return remainingEntries != 0;
+  }
+  
+  /**
+   * Get the last name in this list
+   * @return the last name in the list if it is not empty; otherwise return null
+   */
+  public byte[] getLastName() {
+    if (partialListing.length == 0) {
+      return null;
+    }
+    return partialListing[partialListing.length-1].getLocalNameInBytes();
+  }
+
+  // Writable interface
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    int numEntries = in.readInt();
+    partialListing = new HdfsFileStatusWritable[numEntries];
+    if (numEntries !=0 ) {
+      boolean hasLocation = in.readBoolean();
+      for (int i=0; i<numEntries; i++) {
+        if (hasLocation) {
+          partialListing[i] = new HdfsLocatedFileStatusWritable();
+        } else {
+          partialListing[i] = new HdfsFileStatusWritable();
+        }
+        partialListing[i].readFields(in);
+      }
+    }
+    remainingEntries = in.readInt();
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(partialListing.length);
+    if (partialListing.length != 0) { 
+       if (partialListing[0] instanceof HdfsLocatedFileStatusWritable) {
+         out.writeBoolean(true);
+       } else {
+         out.writeBoolean(false);
+       }
+       for (HdfsFileStatusWritable fileStatus : partialListing) {
+         fileStatus.write(out);
+       }
+    }
+    out.writeInt(remainingEntries);
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ExtendedBlockWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ExtendedBlockWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ExtendedBlockWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ExtendedBlockWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * Identifies a Block uniquely across the block pools
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class ExtendedBlockWritable implements Writable {
+  private String poolId;
+  private long blockId;
+  private long numBytes;
+  private long generationStamp;
+
+  static { // register a ctor
+    WritableFactories.setFactory(ExtendedBlockWritable.class, new WritableFactory() {
+      public Writable newInstance() {
+        return new ExtendedBlockWritable();
+      }
+    });
+  }
+
+  static public org.apache.hadoop.hdfs.protocol.ExtendedBlock convertExtendedBlock(ExtendedBlockWritable eb) {
+    if (eb == null) return null;
+    return new org.apache.hadoop.hdfs.protocol.ExtendedBlock( eb.getBlockPoolId(),  eb.getBlockId(),   eb.getNumBytes(),
+       eb.getGenerationStamp());
+  }
+  
+  public static ExtendedBlockWritable convertExtendedBlock(final org.apache.hadoop.hdfs.protocol.ExtendedBlock b) {
+    if (b == null) return null;
+    return new ExtendedBlockWritable(b.getBlockPoolId(), 
+        b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
+  }
+  
+  public ExtendedBlockWritable() {
+    this(null, 0, 0, 0);
+  }
+
+  public ExtendedBlockWritable(final ExtendedBlockWritable b) {
+    this(b.poolId, b.blockId, b.numBytes, b.generationStamp);
+  }
+  
+  public ExtendedBlockWritable(final String poolId, final long blockId) {
+    this(poolId, blockId, 0, 0);
+  }
+
+  public ExtendedBlockWritable(final String poolId, final long blkid, final long len,
+      final long genstamp) {
+    this.poolId = poolId;
+    this.blockId = blkid;
+    this.numBytes = len;
+    this.generationStamp = genstamp;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    DeprecatedUTF8.writeString(out, poolId);
+    out.writeLong(blockId);
+    out.writeLong(numBytes);
+    out.writeLong(generationStamp);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    this.poolId = DeprecatedUTF8.readString(in);
+    this.blockId = in.readLong();
+    this.numBytes = in.readLong();
+    this.generationStamp = in.readLong();
+    if (numBytes < 0) {
+      throw new IOException("Unexpected block size: " + numBytes);
+    }
+  }
+
+  public String getBlockPoolId() {
+    return poolId;
+  }
+
+  public long getNumBytes() {
+    return numBytes;
+  }
+
+  public long getBlockId() {
+    return blockId;
+  }
+
+  public long getGenerationStamp() {
+    return generationStamp;
+  }
+  
+  @Override // Object
+  public String toString() {
+    return poolId + ":" + (new org.apache.hadoop.hdfs.protocol.Block(blockId, numBytes, generationStamp));
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/FsPermissionWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/FsPermissionWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/FsPermissionWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/FsPermissionWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class FsPermissionWritable  implements Writable {
+  static final WritableFactory FACTORY = new WritableFactory() {
+	public Writable newInstance() { return new FsPermissionWritable(); }
+  };
+  static {                                      // register a ctor
+    WritableFactories.setFactory(FsPermissionWritable.class, FACTORY);
+  }
+  //POSIX permission style
+  private short thePermissions = 0;
+  
+  public static FsPermissionWritable convertPermission(org.apache.hadoop.fs.permission.FsPermission p) {
+    if (p == null) return null;
+    return new FsPermissionWritable(p.toShort());
+  }
+  
+  public static org.apache.hadoop.fs.permission.FsPermission convertPermission(FsPermissionWritable p) {
+    if (p == null) return null;
+    return new org.apache.hadoop.fs.permission.FsPermission(p.thePermissions);
+  }
+  
+  public static FsPermissionWritable getDefault() {
+    return new FsPermissionWritable((short)00777);
+  }
+  
+  FsPermissionWritable() {
+  }
+	FsPermissionWritable(short p) {
+	  thePermissions = p;
+	}
+  
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeShort(thePermissions);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    thePermissions = in.readShort();
+  }
+
+  /**
+   * Create and initialize a {@link FsPermissionWritable} from {@link DataInput}.
+   */
+  public static FsPermissionWritable read(DataInput in) throws IOException {
+    FsPermissionWritable p = new FsPermissionWritable();
+    p.readFields(in);
+    return p;
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/FsServerDefaultsWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/FsServerDefaultsWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/FsServerDefaultsWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/FsServerDefaultsWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/****************************************************
+ * Provides server default configuration values to clients.
+ * 
+ ****************************************************/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class FsServerDefaultsWritable implements Writable {
+
+  static { // register a ctor
+    WritableFactories.setFactory(FsServerDefaultsWritable.class, new WritableFactory() {
+      public Writable newInstance() {
+        return new FsServerDefaultsWritable();
+      }
+    });
+  }
+
+  private long blockSize;
+  private int bytesPerChecksum;
+  private int writePacketSize;
+  private short replication;
+  private int fileBufferSize;
+  
+  public static org.apache.hadoop.fs.FsServerDefaults convert(
+      FsServerDefaultsWritable fs) {
+    if (fs == null) return null;
+    return new org.apache.hadoop.fs.FsServerDefaults(
+        fs.getBlockSize(), fs.getBytesPerChecksum(), 
+        fs.getWritePacketSize(), fs.getReplication(), fs.getFileBufferSize());
+  }
+  
+  public static FsServerDefaultsWritable convert(
+      org.apache.hadoop.fs.FsServerDefaults fs) {
+    if (fs == null) return null;
+    return new FsServerDefaultsWritable(
+        fs.getBlockSize(), fs.getBytesPerChecksum(), 
+        fs.getWritePacketSize(), fs.getReplication(), fs.getFileBufferSize());
+  }
+  
+  public FsServerDefaultsWritable() {
+  }
+
+  public FsServerDefaultsWritable(long blockSize, int bytesPerChecksum,
+      int writePacketSize, short replication, int fileBufferSize) {
+    this.blockSize = blockSize;
+    this.bytesPerChecksum = bytesPerChecksum;
+    this.writePacketSize = writePacketSize;
+    this.replication = replication;
+    this.fileBufferSize = fileBufferSize;
+  }
+
+  public long getBlockSize() {
+    return blockSize;
+  }
+
+  public int getBytesPerChecksum() {
+    return bytesPerChecksum;
+  }
+
+  public int getWritePacketSize() {
+    return writePacketSize;
+  }
+
+  public short getReplication() {
+    return replication;
+  }
+
+  public int getFileBufferSize() {
+    return fileBufferSize;
+  }
+
+  // /////////////////////////////////////////
+  // Writable
+  // /////////////////////////////////////////
+  @Override
+  @InterfaceAudience.Private
+  public void write(DataOutput out) throws IOException {
+    out.writeLong(blockSize);
+    out.writeInt(bytesPerChecksum);
+    out.writeInt(writePacketSize);
+    out.writeShort(replication);
+    out.writeInt(fileBufferSize);
+  }
+
+  @Override
+  @InterfaceAudience.Private
+  public void readFields(DataInput in) throws IOException {
+    blockSize = in.readLong();
+    bytesPerChecksum = in.readInt();
+    writePacketSize = in.readInt();
+    replication = in.readShort();
+    fileBufferSize = in.readInt();
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/HdfsFileStatusWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/HdfsFileStatusWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/HdfsFileStatusWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/HdfsFileStatusWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,351 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+import org.apache.avro.reflect.Nullable;
+
+/** Interface that represents the over the wire information for a file.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class HdfsFileStatusWritable implements Writable {
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (HdfsFileStatusWritable.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new HdfsFileStatusWritable(); }
+       });
+  }
+
+  private byte[] path;  // local name of the inode that's encoded in java UTF8
+  @Nullable
+  private byte[] symlink; // symlink target encoded in java UTF8 or null
+  private long length;
+  private boolean isdir;
+  private short block_replication;
+  private long blocksize;
+  private long modification_time;
+  private long access_time;
+  private FsPermissionWritable permission;
+  private String owner;
+  private String group;
+  
+  public static final byte[] EMPTY_NAME = new byte[0];
+
+  static public org.apache.hadoop.hdfs.protocol.HdfsFileStatus
+    convertHdfsFileStatus(HdfsFileStatusWritable fs) {
+    if (fs == null) return null;
+    return new org.apache.hadoop.hdfs.protocol.HdfsFileStatus(fs.getLen(),
+        fs.isDir(), fs.getReplication(), fs.getBlockSize(),
+        fs.getModificationTime(), fs.getAccessTime(), 
+        FsPermissionWritable.convertPermission(fs.getPermission()),
+        fs.getOwner(), fs.getGroup(), fs.getSymlinkInBytes(),
+        fs.getLocalNameInBytes());
+  }
+  
+  static public HdfsFileStatusWritable[] convertHdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] fs) {
+    if (fs == null) return null;
+    final int len = fs.length;
+    HdfsFileStatusWritable[] result = new HdfsFileStatusWritable[len];
+    for (int i = 0; i < len; ++i) {
+      if (fs[i] instanceof org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) {
+        result[i] = 
+            HdfsLocatedFileStatusWritable.convertLocatedHdfsFileStatus(
+                (org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus)fs[i]);
+      } else {
+        result[i] = HdfsFileStatusWritable.convertHdfsFileStatus(fs[i]);
+      }
+    }
+    return result;
+  }
+  
+
+  public static org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] convertHdfsFileStatus(
+      HdfsFileStatusWritable[] fs) {
+    if (fs == null) return null;
+    final int len = fs.length;
+    org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] result = 
+        new org.apache.hadoop.hdfs.protocol.HdfsFileStatus[len];
+    for (int i = 0; i < len; ++i) {
+      if (fs[i] instanceof HdfsLocatedFileStatusWritable) {
+        result[i] = 
+            HdfsLocatedFileStatusWritable.convertLocatedHdfsFileStatus((HdfsLocatedFileStatusWritable)fs[i]);
+      } else {
+        result[i] = convertHdfsFileStatus(fs[i]);
+      }
+    }
+    return result;
+  }
+ 
+  public static HdfsFileStatusWritable convertHdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus fs) {
+    if (fs == null) return null;
+    return new HdfsFileStatusWritable(fs.getLen(), fs.isDir(), fs.getReplication(),
+       fs.getBlockSize(),  fs.getModificationTime(),  fs.getAccessTime(),
+       org.apache.hadoop.hdfs.protocolR23Compatible.FsPermissionWritable.
+         convertPermission(fs.getPermission()),
+       fs.getOwner(),  fs.getGroup(), 
+       fs.getSymlinkInBytes(), fs.getLocalNameInBytes());
+  }
+  
+  /**
+   * default constructor
+   */
+  public HdfsFileStatusWritable() { 
+    this(0, false, 0, 0, 0, 0, null, null, null, null, null); 
+  }
+  
+  /**
+   * Constructor
+   * @param length the number of bytes the file has
+   * @param isdir if the path is a directory
+   * @param block_replication the replication factor
+   * @param blocksize the block size
+   * @param modification_time modification time
+   * @param access_time access time
+   * @param permission permission
+   * @param owner the owner of the path
+   * @param group the group of the path
+   * @param path the local name in java UTF8 encoding the same as that in-memory
+   */
+  public HdfsFileStatusWritable(long length, boolean isdir, int block_replication,
+                    long blocksize, long modification_time, long access_time,
+                    FsPermissionWritable permission, String owner, String group, 
+                    byte[] symlink, byte[] path) {
+    this.length = length;
+    this.isdir = isdir;
+    this.block_replication = (short)block_replication;
+    this.blocksize = blocksize;
+    this.modification_time = modification_time;
+    this.access_time = access_time;
+    this.permission = (permission == null) ? FsPermissionWritable.getDefault() : permission;
+    this.owner = (owner == null) ? "" : owner;
+    this.group = (group == null) ? "" : group;
+    this.symlink = symlink;
+    this.path = path;
+  }
+
+  /**
+   * Get the length of this file, in bytes.
+   * @return the length of this file, in bytes.
+   */
+  final public long getLen() {
+    return length;
+  }
+
+  /**
+   * Is this a directory?
+   * @return true if this is a directory
+   */
+  final public boolean isDir() {
+    return isdir;
+  }
+
+  /**
+   * Is this a symbolic link?
+   * @return true if this is a symbolic link
+   */
+  public boolean isSymlink() {
+    return symlink != null;
+  }
+   
+  /**
+   * Get the block size of the file.
+   * @return the number of bytes
+   */
+  final public long getBlockSize() {
+    return blocksize;
+  }
+
+  /**
+   * Get the replication factor of a file.
+   * @return the replication factor of a file.
+   */
+  final public short getReplication() {
+    return block_replication;
+  }
+
+  /**
+   * Get the modification time of the file.
+   * @return the modification time of file in milliseconds since January 1, 1970 UTC.
+   */
+  final public long getModificationTime() {
+    return modification_time;
+  }
+
+  /**
+   * Get the access time of the file.
+   * @return the access time of file in milliseconds since January 1, 1970 UTC.
+   */
+  final public long getAccessTime() {
+    return access_time;
+  }
+
+  /**
+   * Get FsPermission associated with the file.
+   * @return permssion
+   */
+  final public FsPermissionWritable getPermission() {
+    return permission;
+  }
+  
+  /**
+   * Get the owner of the file.
+   * @return owner of the file
+   */
+  final public String getOwner() {
+    return owner;
+  }
+  
+  /**
+   * Get the group associated with the file.
+   * @return group for the file. 
+   */
+  final public String getGroup() {
+    return group;
+  }
+  
+  /**
+   * Check if the local name is empty
+   * @return true if the name is empty
+   */
+  final public boolean isEmptyLocalName() {
+    return path.length == 0;
+  }
+
+  /**
+   * Get the string representation of the local name
+   * @return the local name in string
+   */
+  final public String getLocalName() {
+    return DFSUtil.bytes2String(path);
+  }
+  
+  /**
+   * Get the Java UTF8 representation of the local name
+   * @return the local name in java UTF8
+   */
+  final public byte[] getLocalNameInBytes() {
+    return path;
+  }
+
+  /**
+   * Get the string representation of the full path name
+   * @param parent the parent path
+   * @return the full path in string
+   */
+  final public String getFullName(final String parent) {
+    if (isEmptyLocalName()) {
+      return parent;
+    }
+    
+    StringBuilder fullName = new StringBuilder(parent);
+    if (!parent.endsWith(Path.SEPARATOR)) {
+      fullName.append(Path.SEPARATOR);
+    }
+    fullName.append(getLocalName());
+    return fullName.toString();
+  }
+
+  /**
+   * Get the full path
+   * @param parent the parent path
+   * @return the full path
+   */
+  final public Path getFullPath(final Path parent) {
+    if (isEmptyLocalName()) {
+      return parent;
+    }
+    
+    return new Path(parent, getLocalName());
+  }
+
+  /**
+   * Get the string representation of the symlink.
+   * @return the symlink as a string.
+   */
+  final public String getSymlink() {
+    return DFSUtil.bytes2String(symlink);
+  }
+  
+  final public byte[] getSymlinkInBytes() {
+    return symlink;
+  }
+  
+
+  //////////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////////
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(path.length);
+    out.write(path);
+    out.writeLong(length);
+    out.writeBoolean(isdir);
+    out.writeShort(block_replication);
+    out.writeLong(blocksize);
+    out.writeLong(modification_time);
+    out.writeLong(access_time);
+    permission.write(out);
+    Text.writeString(out, owner);
+    Text.writeString(out, group);
+    out.writeBoolean(isSymlink());
+    if (isSymlink()) {
+      out.writeInt(symlink.length);
+      out.write(symlink);
+    }
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    int numOfBytes = in.readInt();
+    if (numOfBytes == 0) {
+      this.path = EMPTY_NAME;
+    } else {
+      this.path = new byte[numOfBytes];
+      in.readFully(path);
+    }
+    this.length = in.readLong();
+    this.isdir = in.readBoolean();
+    this.block_replication = in.readShort();
+    blocksize = in.readLong();
+    modification_time = in.readLong();
+    access_time = in.readLong();
+    permission.readFields(in);
+    owner = Text.readString(in);
+    group = Text.readString(in);
+    if (in.readBoolean()) {
+      numOfBytes = in.readInt();
+      this.symlink = new byte[numOfBytes];
+      in.readFully(symlink);
+    }
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/HdfsLocatedFileStatusWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/HdfsLocatedFileStatusWritable.java?rev=1179877&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/HdfsLocatedFileStatusWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/HdfsLocatedFileStatusWritable.java Thu Oct  6 21:58:22 2011
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/** 
+ * Interface that represents the over the wire information
+ * including block locations for a file.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class HdfsLocatedFileStatusWritable extends HdfsFileStatusWritable {
+  private LocatedBlocksWritable locations;
+  
+  /**
+   * Default constructor
+   */
+  public HdfsLocatedFileStatusWritable() {
+  }
+  
+  /**
+   * Constructor
+   * 
+   * @param length size
+   * @param isdir if this is directory
+   * @param block_replication the file's replication factor
+   * @param blocksize the file's block size
+   * @param modification_time most recent modification time
+   * @param access_time most recent access time
+   * @param permission permission
+   * @param owner owner
+   * @param group group
+   * @param symlink symbolic link
+   * @param path local path name in java UTF8 format 
+   * @param locations block locations
+   */
+  public HdfsLocatedFileStatusWritable(long length, boolean isdir,
+      int block_replication,
+	    long blocksize, long modification_time, long access_time,
+	    FsPermissionWritable permission, String owner, String group, 
+	    byte[] symlink, byte[] path, LocatedBlocksWritable locations) {
+	  super(length, isdir, block_replication, blocksize, modification_time,
+		  access_time, permission, owner, group, symlink, path);
+    this.locations = locations;
+	}
+  
+  static public org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus
+  convertLocatedHdfsFileStatus(HdfsLocatedFileStatusWritable fs) {
+  if (fs == null) return null;
+  return new org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus(fs.getLen(),
+      fs.isDir(), fs.getReplication(), fs.getBlockSize(),
+      fs.getModificationTime(), fs.getAccessTime(), 
+      FsPermissionWritable.convertPermission(fs.getPermission()),
+      fs.getOwner(), fs.getGroup(), fs.getSymlinkInBytes(),
+      fs.getLocalNameInBytes(),
+      LocatedBlocksWritable.convertLocatedBlocks(fs.getBlockLocations()));
+}
+  
+  static public HdfsLocatedFileStatusWritable convertLocatedHdfsFileStatus(
+      org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus fs) {
+  if (fs == null) return null;
+  return new HdfsLocatedFileStatusWritable(fs.getLen(),
+      fs.isDir(), fs.getReplication(), fs.getBlockSize(),
+      fs.getModificationTime(), fs.getAccessTime(), 
+      org.apache.hadoop.hdfs.protocolR23Compatible.FsPermissionWritable.
+        convertPermission(fs.getPermission()),
+      fs.getOwner(), fs.getGroup(), fs.getSymlinkInBytes(),
+      fs.getLocalNameInBytes(), 
+      LocatedBlocksWritable.convertLocatedBlocks(fs.getBlockLocations()));
+}
+	
+	public LocatedBlocksWritable getBlockLocations() {
+		return locations;
+	}
+	
+  //////////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////////
+  @Override
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    if (!isDir() && !isSymlink()) {
+      locations.write(out);
+    }
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    if (!isDir() && !isSymlink()) {
+      locations = new LocatedBlocksWritable();
+      locations.readFields(in);
+    }
+  }
+}



Mime
View raw message