hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1213389 [2/3] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/ hadoop-hdfs-httpfs/src/main/ hadoop-hdfs-httpfs/src/main/conf/ hadoop-hdfs-httpfs/src/main/java/ hadoop-hdfs-httpfs/sr...
Date Mon, 12 Dec 2011 19:41:31 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java Mon Dec 12 19:41:20 2011
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.protocolP
 import java.io.IOException;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -39,8 +40,6 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto;
 import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -240,14 +239,6 @@ public class NamenodeProtocolServerSideT
       throw new ServiceException(e);
     }
     return VersionResponseProto.newBuilder()
-        .setInfo(convert(info)).build();
-  }
-
-  private NamespaceInfoProto convert(NamespaceInfo info) {
-    return NamespaceInfoProto.newBuilder()
-        .setBlockPoolID(info.getBlockPoolID())
-        .setBuildVersion(info.getBuildVersion())
-        .setDistUpgradeVersion(info.getDistributedUpgradeVersion())
-        .setStorageInfo(PBHelper.convert(info)).build();
+        .setInfo(PBHelper.convert(info)).build();
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java Mon Dec 12 19:41:20 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
@@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -56,6 +56,7 @@ import org.apache.hadoop.io.retry.RetryP
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -91,11 +92,11 @@ public class NamenodeProtocolTranslatorP
 
   final private NamenodeProtocolPB rpcProxy;
 
-
-
   private static NamenodeProtocolPB createNamenode(
       InetSocketAddress nameNodeAddr, Configuration conf,
       UserGroupInformation ugi) throws IOException {
+    RPC.setProtocolEngine(conf, NamenodeProtocolPB.class,
+        ProtobufRpcEngine.class);
     return RPC.getProxy(NamenodeProtocolPB.class,
         RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, ugi,
         conf, NetUtils.getSocketFactory(conf, NamenodeProtocolPB.class));
@@ -107,17 +108,20 @@ public class NamenodeProtocolTranslatorP
     RetryPolicy createPolicy = RetryPolicies
         .retryUpToMaximumCountWithFixedSleep(5,
             HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
-    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
+    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = 
+        new HashMap<Class<? extends Exception>, RetryPolicy>();
     remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
         createPolicy);
 
-    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
+    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = 
+        new HashMap<Class<? extends Exception>, RetryPolicy>();
     exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
         .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
             remoteExceptionToPolicyMap));
     RetryPolicy methodPolicy = RetryPolicies.retryByException(
         RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
-    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
+    Map<String, RetryPolicy> methodNameToPolicyMap = 
+        new HashMap<String, RetryPolicy>();
 
     methodNameToPolicyMap.put("create", methodPolicy);
 
@@ -129,6 +133,10 @@ public class NamenodeProtocolTranslatorP
       Configuration conf, UserGroupInformation ugi) throws IOException {
     rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
   }
+  
+  public NamenodeProtocolTranslatorPB(NamenodeProtocolPB rpcProxy) {
+    this.rpcProxy = rpcProxy;
+  }
 
   public void close() {
     RPC.stopProxy(rpcProxy);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Mon Dec 12 19:41:20 2011
@@ -19,14 +19,45 @@ package org.apache.hadoop.hdfs.protocolP
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.List;
 
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeActionProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
@@ -34,13 +65,22 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
@@ -50,22 +90,38 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockKey;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
+import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.Token;
 
@@ -74,7 +130,11 @@ import com.google.protobuf.ByteString;
 /**
  * Utilities for converting protobuf classes to and from implementation classes.
  */
-class PBHelper {
+public class PBHelper {
+  private static final RegisterCommandProto REG_CMD_PROTO = 
+      RegisterCommandProto.newBuilder().build();
+  private static final RegisterCommand REG_CMD = new RegisterCommand();
+
   private PBHelper() {
     /** Hidden constructor */
   }
@@ -130,6 +190,7 @@ class PBHelper {
         convert(reg.getStorageInfo()), convert(reg.getRole()));
   }
 
+  // DatanodeId
   public static DatanodeID convert(DatanodeIDProto dn) {
     return new DatanodeID(dn.getName(), dn.getStorageID(), dn.getInfoPort(),
         dn.getIpcPort());
@@ -141,6 +202,28 @@ class PBHelper {
         .setStorageID(dn.getStorageID()).build();
   }
 
+  // Arrays of DatanodeId
+  public static DatanodeIDProto[] convert(DatanodeID[] did) {
+    if (did == null) return null;
+    final int len = did.length;
+    DatanodeIDProto[] result = new DatanodeIDProto[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = convert(did[i]);
+    }
+    return result;
+  }
+  
+  public static DatanodeID[] convert(DatanodeIDProto[] did) {
+    if (did == null) return null;
+    final int len = did.length;
+    DatanodeID[] result = new DatanodeID[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = convert(did[i]);
+    }
+    return result;
+  }
+  
+  // Block
   public static BlockProto convert(Block b) {
     return BlockProto.newBuilder().setBlockId(b.getBlockId())
         .setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes())
@@ -148,7 +231,7 @@ class PBHelper {
   }
 
   public static Block convert(BlockProto b) {
-    return new Block(b.getBlockId(), b.getGenStamp(), b.getNumBytes());
+    return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
   }
 
   public static BlockWithLocationsProto convert(BlockWithLocations blk) {
@@ -295,19 +378,27 @@ class PBHelper {
       return new NamenodeCommand(cmd.getAction());
     }
   }
-
-  public static ExtendedBlockProto convert(ExtendedBlock b) {
-    return ExtendedBlockProto.newBuilder().setBlockId(b.getBlockId())
-        .setGenerationStamp(b.getGenerationStamp())
-        .setNumBytes(b.getNumBytes()).setPoolId(b.getBlockPoolId()).build();
+  
+  public static ExtendedBlock convert(ExtendedBlockProto eb) {
+    if (eb == null) return null;
+    return new ExtendedBlock( eb.getPoolId(),  eb.getBlockId(),   eb.getNumBytes(),
+       eb.getGenerationStamp());
   }
-
-  public static ExtendedBlock convert(ExtendedBlockProto b) {
-    return new ExtendedBlock(b.getPoolId(), b.getBlockId(), b.getNumBytes(),
-        b.getGenerationStamp());
+  
+  public static ExtendedBlockProto convert(final ExtendedBlock b) {
+    if (b == null) return null;
+   return ExtendedBlockProto.newBuilder().
+      setPoolId(b.getBlockPoolId()).
+      setBlockId(b.getBlockId()).
+      setNumBytes(b.getNumBytes()).
+      setGenerationStamp(b.getGenerationStamp()).
+      build();
   }
-
+  
   public static RecoveringBlockProto convert(RecoveringBlock b) {
+    if (b == null) {
+      return null;
+    }
     LocatedBlockProto lb = PBHelper.convert((LocatedBlock)b);
     return RecoveringBlockProto.newBuilder().setBlock(lb)
         .setNewGenStamp(b.getNewGenerationStamp()).build();
@@ -318,6 +409,62 @@ class PBHelper {
     DatanodeInfo[] locs = convert(b.getBlock().getLocsList());
     return new RecoveringBlock(block, locs, b.getNewGenStamp());
   }
+  
+  public static DatanodeInfoProto.AdminState convert(
+      final DatanodeInfo.AdminStates inAs) {
+    switch (inAs) {
+    case NORMAL: return  DatanodeInfoProto.AdminState.NORMAL;
+    case DECOMMISSION_INPROGRESS: 
+        return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS;
+    case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED;
+    default: return DatanodeInfoProto.AdminState.NORMAL;
+    }
+  }
+  
+  static public DatanodeInfo convert(DatanodeInfoProto di) {
+    if (di == null) return null;
+    return new DatanodeInfo(
+        PBHelper.convert(di.getId()),
+        di.getLocation(), di.getHostName(),
+        di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
+        di.getBlockPoolUsed()  ,  di.getLastUpdate() , di.getXceiverCount() ,
+        PBHelper.convert(di.getAdminState())); 
+  }
+  
+  static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
+    if (di == null) return null;
+    return DatanodeInfoProto.newBuilder().
+     setId(PBHelper.convert((DatanodeID) di)).
+     setLocation(di.getNetworkLocation()).
+     setHostName(di.getHostName()).
+     setCapacity(di.getCapacity()).
+     setDfsUsed(di.getDfsUsed()).
+     setRemaining(di.getRemaining()).
+     setBlockPoolUsed(di.getBlockPoolUsed()).
+     setLastUpdate(di.getLastUpdate()).
+     setXceiverCount(di.getXceiverCount()).
+     setAdminState(PBHelper.convert(di.getAdminState())).
+     build();     
+  }
+  
+  
+  static public DatanodeInfo[] convert(DatanodeInfoProto di[]) {
+    if (di == null) return null;
+    DatanodeInfo[] result = new DatanodeInfo[di.length];
+    for (int i = 0; i < di.length; i++) {
+      result[i] = convert(di[i]);
+    }    
+    return result;
+  }
+  
+  static public DatanodeInfoProto[] convert(DatanodeInfo[] di) {
+    if (di == null) return null;
+    DatanodeInfoProto[] result = new DatanodeInfoProto[di.length];
+    for (int i = 0; i < di.length; i++) {
+      result[i] = PBHelper.convertDatanodeInfo(di[i]);
+    }
+    return result;
+  }
 
   public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) {
     DatanodeInfo[] info = new DatanodeInfo[list.size()];
@@ -326,21 +473,12 @@ class PBHelper {
     }
     return info;
   }
-
-  public static DatanodeInfo convert(DatanodeInfoProto info) {
-    DatanodeIDProto dnId = info.getId();
-    return new DatanodeInfo(dnId.getName(), dnId.getStorageID(),
-        dnId.getInfoPort(), dnId.getIpcPort(), info.getCapacity(),
-        info.getDfsUsed(), info.getRemaining(), info.getBlockPoolUsed(),
-        info.getLastUpdate(), info.getXceiverCount(), info.getLocation(),
-        info.getHostName(), convert(info.getAdminState()));
-  }
   
   public static DatanodeInfoProto convert(DatanodeInfo info) {
-    return DatanodeInfoProto.newBuilder()
-        .setAdminState(PBHelper.convert(info.getAdminState()))
-        .setBlockPoolUsed(info.getBlockPoolUsed())
-        .setCapacity(info.getCapacity())
+    DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
+    builder.setBlockPoolUsed(info.getBlockPoolUsed());
+    builder.setAdminState(PBHelper.convert(info.getAdminState()));
+    builder.setCapacity(info.getCapacity())
         .setDfsUsed(info.getDfsUsed())
         .setHostName(info.getHostName())
         .setId(PBHelper.convert((DatanodeID)info))
@@ -349,6 +487,7 @@ class PBHelper {
         .setRemaining(info.getRemaining())
         .setXceiverCount(info.getXceiverCount())
         .build();
+    return builder.build();
   }
 
   public static AdminStates convert(AdminState adminState) {
@@ -363,36 +502,37 @@ class PBHelper {
     }
   }
   
-  public static AdminState convert(AdminStates adminState) {
-    switch(adminState) {
-    case DECOMMISSION_INPROGRESS:
-      return AdminState.DECOMMISSION_INPROGRESS;
-    case DECOMMISSIONED:
-      return AdminState.DECOMMISSIONED;
-    case NORMAL:
-    default:
-      return AdminState.NORMAL;
-    }
-  }
-
   public static LocatedBlockProto convert(LocatedBlock b) {
+    if (b == null) return null;
     Builder builder = LocatedBlockProto.newBuilder();
     DatanodeInfo[] locs = b.getLocations();
-    for(DatanodeInfo loc : locs) {
-      builder.addLocs(PBHelper.convert(loc));
+    for (int i = 0; i < locs.length; i++) {
+      builder.addLocs(i, PBHelper.convert(locs[i]));
     }
     return builder.setB(PBHelper.convert(b.getBlock()))
         .setBlockToken(PBHelper.convert(b.getBlockToken()))
         .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
   }
+  
+  public static LocatedBlock convert(LocatedBlockProto proto) {
+    if (proto == null) return null;
+    List<DatanodeInfoProto> locs = proto.getLocsList();
+    DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
+    for (int i = 0; i < locs.size(); i++) {
+      targets[i] = PBHelper.convert(locs.get(i));
+    }
+    LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets,
+        proto.getOffset(), proto.getCorrupt());
+    lb.setBlockToken(PBHelper.convert(proto.getBlockToken()));
+    return lb;
+  }
 
-  public static BlockTokenIdentifierProto convert(
-      Token<BlockTokenIdentifier> token) {
-    ByteString tokenId = ByteString.copyFrom(token.getIdentifier());
-    ByteString password = ByteString.copyFrom(token.getPassword());
-    return BlockTokenIdentifierProto.newBuilder().setIdentifier(tokenId)
-        .setKind(token.getKind().toString()).setPassword(password)
-        .setService(token.getService().toString()).build();
+  public static BlockTokenIdentifierProto convert(Token<?> tok) {
+    return BlockTokenIdentifierProto.newBuilder().
+              setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
+              setPassword(ByteString.copyFrom(tok.getPassword())).
+              setKind(tok.getKind().toString()).
+              setService(tok.getService().toString()).build(); 
   }
   
   public static Token<BlockTokenIdentifier> convert(
@@ -402,6 +542,14 @@ class PBHelper {
         blockToken.getKind()), new Text(blockToken.getService()));
   }
 
+  
+  public static Token<DelegationTokenIdentifier> convertDelegationToken(
+      BlockTokenIdentifierProto blockToken) {
+    return new Token<DelegationTokenIdentifier>(blockToken.getIdentifier()
+        .toByteArray(), blockToken.getPassword().toByteArray(), new Text(
+        blockToken.getKind()), new Text(blockToken.getService()));
+  }
+
   public static ReplicaState convert(ReplicaStateProto state) {
     switch (state) {
     case RBW:
@@ -417,4 +565,641 @@ class PBHelper {
       return ReplicaState.FINALIZED;
     }
   }
+
+  public static ReplicaStateProto convert(ReplicaState state) {
+    switch (state) {
+    case RBW:
+      return ReplicaStateProto.RBW;
+    case RUR:
+      return ReplicaStateProto.RUR;
+    case RWR:
+      return ReplicaStateProto.RWR;
+    case TEMPORARY:
+      return ReplicaStateProto.TEMPORARY;
+    case FINALIZED:
+    default:
+      return ReplicaStateProto.FINALIZED;
+    }
+  }
+  
+  public static DatanodeRegistrationProto convert(
+      DatanodeRegistration registration) {
+    DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
+        .newBuilder();
+    return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
+        .setStorageInfo(PBHelper.convert(registration.storageInfo))
+        .setKeys(PBHelper.convert(registration.exportedKeys)).build();
+  }
+
+  public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
+    return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()),
+        PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto
+            .getKeys()));
+  }
+
+  public static DatanodeCommand convert(DatanodeCommandProto proto) {
+    switch (proto.getCmdType()) {
+    case BalancerBandwidthCommand:
+      return PBHelper.convert(proto.getBalancerCmd());
+    case BlockCommand:
+      return PBHelper.convert(proto.getBlkCmd());
+    case BlockRecoveryCommand:
+      return PBHelper.convert(proto.getRecoveryCmd());
+    case FinalizeCommand:
+      return PBHelper.convert(proto.getFinalizeCmd());
+    case KeyUpdateCommand:
+      return PBHelper.convert(proto.getKeyUpdateCmd());
+    case RegisterCommand:
+      return REG_CMD;
+    case UpgradeCommand:
+      return PBHelper.convert(proto.getUpgradeCmd());
+    }
+    return null;
+  }
+  
+  public static BalancerBandwidthCommandProto convert(
+      BalancerBandwidthCommand bbCmd) {
+    return BalancerBandwidthCommandProto.newBuilder()
+        .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
+  }
+
+  public static KeyUpdateCommandProto convert(KeyUpdateCommand cmd) {
+    return KeyUpdateCommandProto.newBuilder()
+        .setKeys(PBHelper.convert(cmd.getExportedKeys())).build();
+  }
+
+  public static BlockRecoveryCommandProto convert(BlockRecoveryCommand cmd) {
+    BlockRecoveryCommandProto.Builder builder = BlockRecoveryCommandProto
+        .newBuilder();
+    for (RecoveringBlock b : cmd.getRecoveringBlocks()) {
+      builder.addBlocks(PBHelper.convert(b));
+    }
+    return builder.build();
+  }
+
+  public static FinalizeCommandProto convert(FinalizeCommand cmd) {
+    return FinalizeCommandProto.newBuilder()
+        .setBlockPoolId(cmd.getBlockPoolId()).build();
+  }
+
+  public static BlockCommandProto convert(BlockCommand cmd) {
+    BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
+        .setBlockPoolId(cmd.getBlockPoolId());
+    switch (cmd.getAction()) {
+    case DatanodeProtocol.DNA_TRANSFER:
+      builder.setAction(BlockCommandProto.Action.TRANSFER);
+      break;
+    case DatanodeProtocol.DNA_INVALIDATE:
+      builder.setAction(BlockCommandProto.Action.INVALIDATE);
+      break;
+    }
+    Block[] blocks = cmd.getBlocks();
+    for (int i = 0; i < blocks.length; i++) {
+      builder.addBlocks(PBHelper.convert(blocks[i]));
+    }
+    builder.addAllTargets(PBHelper.convert(cmd.getTargets()));
+    return builder.build();
+  }
+
+  private static List<DatanodeInfosProto> convert(DatanodeInfo[][] targets) {
+    DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length];
+    for (int i = 0; i < targets.length; i++) {
+      ret[i] = DatanodeInfosProto.newBuilder()
+          .addAllDatanodes(Arrays.asList(PBHelper.convert(targets[i]))).build();
+    }
+    return Arrays.asList(ret);
+  }
+
+  public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
+    DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
+    switch (datanodeCommand.getAction()) {
+    case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
+      builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
+          .setBalancerCmd(
+              PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
+      break;
+    case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
+      builder
+          .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
+          .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
+      break;
+    case DatanodeProtocol.DNA_RECOVERBLOCK:
+      builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
+          .setRecoveryCmd(
+              PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
+      break;
+    case DatanodeProtocol.DNA_FINALIZE:
+      builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
+          .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
+      break;
+    case DatanodeProtocol.DNA_REGISTER:
+      builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
+          .setRegisterCmd(REG_CMD_PROTO);
+      break;
+    case DatanodeProtocol.DNA_TRANSFER:
+    case DatanodeProtocol.DNA_INVALIDATE:
+      builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
+          PBHelper.convert((BlockCommand) datanodeCommand));
+      break;
+    case DatanodeProtocol.DNA_SHUTDOWN: //Not expected
+    case DatanodeProtocol.DNA_UNKNOWN: //Not expected
+    }
+    return builder.build();
+  }
+
+  public static UpgradeCommand convert(UpgradeCommandProto upgradeCmd) {
+    int action = UpgradeCommand.UC_ACTION_UNKNOWN;
+    switch (upgradeCmd.getAction()) {
+    case REPORT_STATUS:
+      action = UpgradeCommand.UC_ACTION_REPORT_STATUS;
+      break;
+    case START_UPGRADE:
+      action = UpgradeCommand.UC_ACTION_START_UPGRADE;
+    }
+    return new UpgradeCommand(action, upgradeCmd.getVersion(),
+        (short) upgradeCmd.getUpgradeStatus());
+  }
+
+  public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) {
+    return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys()));
+  }
+
+  public static FinalizeCommand convert(FinalizeCommandProto finalizeCmd) {
+    return new FinalizeCommand(finalizeCmd.getBlockPoolId());
+  }
+
+  public static BlockRecoveryCommand convert(
+      BlockRecoveryCommandProto recoveryCmd) {
+    List<RecoveringBlockProto> list = recoveryCmd.getBlocksList();
+    List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>(
+        list.size());
+    for (int i = 0; i < list.size(); i++) {
+      recoveringBlocks.add(PBHelper.convert(list.get(0)));
+    }
+    return new BlockRecoveryCommand(recoveringBlocks);
+  }
+
+  public static BlockCommand convert(BlockCommandProto blkCmd) {
+    List<BlockProto> blockProtoList = blkCmd.getBlocksList();
+    List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
+    DatanodeInfo[][] targets = new DatanodeInfo[blockProtoList.size()][];
+    Block[] blocks = new Block[blockProtoList.size()];
+    for (int i = 0; i < blockProtoList.size(); i++) {
+      targets[i] = PBHelper.convert(targetList.get(i));
+      blocks[i] = PBHelper.convert(blockProtoList.get(i));
+    }
+    int action = DatanodeProtocol.DNA_UNKNOWN;
+    switch (blkCmd.getAction()) {
+    case TRANSFER:
+      action = DatanodeProtocol.DNA_TRANSFER;
+      break;
+    case INVALIDATE:
+      action = DatanodeProtocol.DNA_INVALIDATE;
+      break;
+    }
+    return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
+  }
+
+  public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) {
+    List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList();
+    DatanodeInfo[] infos = new DatanodeInfo[proto.size()];
+    for (int i = 0; i < infos.length; i++) {
+      infos[i] = PBHelper.convert(proto.get(i));
+    }
+    return infos;
+  }
+
+  public static BalancerBandwidthCommand convert(
+      BalancerBandwidthCommandProto balancerCmd) {
+    return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
+  }
+
+  public static ReceivedDeletedBlockInfoProto convert(
+      ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
+    return ReceivedDeletedBlockInfoProto.newBuilder()
+        .setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
+        .setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build();
+  }
+
+  public static UpgradeCommandProto convert(UpgradeCommand comm) {
+    UpgradeCommandProto.Builder builder = UpgradeCommandProto.newBuilder()
+        .setVersion(comm.getVersion())
+        .setUpgradeStatus(comm.getCurrentStatus());
+    switch (comm.getAction()) {
+    case UpgradeCommand.UC_ACTION_REPORT_STATUS:
+      builder.setAction(UpgradeCommandProto.Action.REPORT_STATUS);
+      break;
+    case UpgradeCommand.UC_ACTION_START_UPGRADE:
+      builder.setAction(UpgradeCommandProto.Action.START_UPGRADE);
+      break;
+    default:
+      builder.setAction(UpgradeCommandProto.Action.UNKNOWN);
+      break;
+    }
+    return builder.build();
+  }
+
+  public static ReceivedDeletedBlockInfo convert(
+      ReceivedDeletedBlockInfoProto proto) {
+    return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()),
+        proto.getDeleteHint());
+  }
+  
+  public static NamespaceInfoProto convert(NamespaceInfo info) {
+    return NamespaceInfoProto.newBuilder()
+        .setBlockPoolID(info.getBlockPoolID())
+        .setBuildVersion(info.getBuildVersion())
+        .setDistUpgradeVersion(info.getDistributedUpgradeVersion())
+        .setStorageInfo(PBHelper.convert((StorageInfo)info)).build();
+  }
+  
+  // Located Block Arrays and Lists
+  public static LocatedBlockProto[] convertLocatedBlock(LocatedBlock[] lb) {
+    if (lb == null) return null;
+    final int len = lb.length;
+    LocatedBlockProto[] result = new LocatedBlockProto[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = PBHelper.convert(lb[i]);
+    }
+    return result;
+  }
+  
+  public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) {
+    if (lb == null) return null;
+    final int len = lb.length;
+    LocatedBlock[] result = new LocatedBlock[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = new LocatedBlock(
+          PBHelper.convert(lb[i].getB()),
+          PBHelper.convert(lb[i].getLocsList()), 
+          lb[i].getOffset(), lb[i].getCorrupt());
+    }
+    return result;
+  }
+  
+  public static List<LocatedBlock> convertLocatedBlock(
+      List<LocatedBlockProto> lb) {
+    if (lb == null) return null;
+    final int len = lb.size();
+    List<LocatedBlock> result = 
+        new ArrayList<LocatedBlock>(len);
+    for (int i = 0; i < len; ++i) {
+      result.add(PBHelper.convert(lb.get(i)));
+    }
+    return result;
+  }
+  
+  public static List<LocatedBlockProto> convertLocatedBlock2(List<LocatedBlock> lb) {
+    if (lb == null) return null;
+    final int len = lb.size();
+    List<LocatedBlockProto> result = new ArrayList<LocatedBlockProto>(len);
+    for (int i = 0; i < len; ++i) {
+      result.add(PBHelper.convert(lb.get(i)));
+    }
+    return result;
+  }
+  
+  
+  // LocatedBlocks
+  public static LocatedBlocks convert(LocatedBlocksProto lb) {
+    if (lb == null) {
+      return null;
+    }
+    return new LocatedBlocks(
+        lb.getFileLength(), lb.getUnderConstruction(),
+        PBHelper.convertLocatedBlock(lb.getBlocksList()),
+        PBHelper.convert(lb.getLastBlock()),
+        lb.getIsLastBlockComplete());
+  }
+  
+  public static LocatedBlocksProto convert(LocatedBlocks lb) {
+    if (lb == null) {
+      return null;
+    }
+    return LocatedBlocksProto.newBuilder().
+      setFileLength(lb.getFileLength()).
+      setUnderConstruction(lb.isUnderConstruction()).
+      addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())).
+      setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())).setIsLastBlockComplete(lb.isLastBlockComplete()).build();
+  }
+  
+  public static FsServerDefaults convert(FsServerDefaultsProto fs) {
+    if (fs == null) return null;
+    return new FsServerDefaults(
+        fs.getBlockSize(), fs.getBytesPerChecksum(), 
+        fs.getWritePacketSize(), (short) fs.getReplication(),
+        fs.getFileBufferSize());
+  }
+  
+  public static FsServerDefaultsProto convert(FsServerDefaults fs) {
+    if (fs == null) return null;
+    return FsServerDefaultsProto.newBuilder().
+      setBlockSize(fs.getBlockSize()).
+      setBytesPerChecksum(fs.getBytesPerChecksum()).
+      setWritePacketSize(fs.getWritePacketSize()).setReplication(fs.getReplication()).setFileBufferSize(fs.getFileBufferSize()).build();
+  }
+  
+  public static FsPermissionProto convert(FsPermission p) {
+    if (p == null) return null;
+    return FsPermissionProto.newBuilder().setPerm(p.toShort()).build();
+  }
+  
+  public static FsPermission convert(FsPermissionProto p) {
+    if (p == null) return null;
+    return new FsPermission((short)p.getPerm());
+  }
+  
+  
+  // The creatFlag field in PB is a bitmask whose values are the same a the 
+  // emum values of CreateFlag
+  public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) {
+    int value = 0;
+    if (flag.contains(CreateFlag.APPEND)) {
+      value |= CreateFlagProto.APPEND.getNumber();
+    }
+    if (flag.contains(CreateFlag.CREATE)) {
+      value |= CreateFlagProto.CREATE.getNumber();
+    }
+    if (flag.contains(CreateFlag.OVERWRITE)) {
+      value |= CreateFlagProto.OVERWRITE.getNumber();
+    }
+    return value;
+  }
+  
+  public static EnumSetWritable<CreateFlag> convert(int flag) {
+    EnumSet<CreateFlag> result = 
+       EnumSet.noneOf(CreateFlag.class);   
+    if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) {
+      result.add(CreateFlag.APPEND);
+    }
+    return new EnumSetWritable<CreateFlag>(result);
+  }
+  
+  
+  public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
+    if (fs == null)
+      return null;
+    if (fs.hasLocations()) {
+      return new HdfsLocatedFileStatus(
+          fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
+          fs.getBlockReplication(), fs.getBlocksize(),
+          fs.getModificationTime(), fs.getAccessTime(),
+          PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
+          fs.getFileType().equals(FileType.IS_SYMLINK) ? 
+              fs.getSymlink().toByteArray() : null,
+          fs.getPath().toByteArray(),
+          PBHelper.convert(fs.hasLocations() ? fs.getLocations() : null));
+    }
+    return new HdfsFileStatus(
+      fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
+      fs.getBlockReplication(), fs.getBlocksize(),
+      fs.getModificationTime(), fs.getAccessTime(),
+      PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
+      fs.getFileType().equals(FileType.IS_SYMLINK) ? 
+          fs.getSymlink().toByteArray() : null,
+      fs.getPath().toByteArray());
+  }
+
+  public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
+    if (fs == null)
+      return null;
+    FileType fType = FileType.IS_DIR;;
+    if (fs.isDir()) {
+      fType = FileType.IS_DIR;
+    } else if (fs.isSymlink()) {
+      fType = FileType.IS_SYMLINK;
+    }
+
+    HdfsFileStatusProto.Builder builder = 
+     HdfsFileStatusProto.newBuilder().
+      setLength(fs.getLen()).
+      setFileType(fType).
+      setBlockReplication(fs.getReplication()).
+      setBlocksize(fs.getBlockSize()).
+      setModificationTime(fs.getModificationTime()).
+      setAccessTime(fs.getAccessTime()).
+      setPermission(PBHelper.convert(fs.getPermission())).
+      setOwner(fs.getOwner()).
+      setGroup(fs.getGroup()).
+      setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())).
+      setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
+    LocatedBlocks locations = null;
+    if (fs instanceof HdfsLocatedFileStatus) {
+      builder.setLocations(PBHelper.convert(locations));
+    }
+    return builder.build();
+  }
+  
+  public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) {
+    if (fs == null) return null;
+    final int len = fs.length;
+    HdfsFileStatusProto[] result = new HdfsFileStatusProto[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = PBHelper.convert(fs[i]);
+    }
+    return result;
+  }
+  
+  public static HdfsFileStatus[] convert(HdfsFileStatusProto[] fs) {
+    if (fs == null) return null;
+    final int len = fs.length;
+    HdfsFileStatus[] result = new HdfsFileStatus[len];
+    for (int i = 0; i < len; ++i) {
+      PBHelper.convert(fs[i]);
+    }
+    return result;
+  }
+  
+  public static DirectoryListing convert(DirectoryListingProto dl) {
+    if (dl == null)
+      return null;
+    return new DirectoryListing(
+        PBHelper.convert((HdfsFileStatusProto[]) 
+            dl.getPartialListingList().toArray()),
+        dl.getRemainingEntries());
+  }
+
+  public static DirectoryListingProto convert(DirectoryListing d) {
+    if (d == null)
+      return null;
+    return DirectoryListingProto.newBuilder().
+        addAllPartialListing(Arrays.asList(
+            PBHelper.convert(d.getPartialListing()))).
+        setRemainingEntries(d.getRemainingEntries()).
+        build();
+  }
+
+  public static long[] convert(GetFsStatsResponseProto res) {
+    long[] result = new long[6];
+    result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity();
+    result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed();
+    result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining();
+    result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = res.getUnderReplicated();
+    result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = res.getCorruptBlocks();
+    result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = res.getMissingBlocks();
+    return result;
+  }
+  
+  public static GetFsStatsResponseProto convert(long[] fsStats) {
+    GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto
+        .newBuilder();
+    if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1)
+      result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1)
+      result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1)
+      result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1)
+      result.setUnderReplicated(
+              fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1)
+      result.setCorruptBlocks(
+          fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1)
+      result.setMissingBlocks(
+          fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]);
+    return result.build();
+  }
+  
+  public static DatanodeReportTypeProto
+    convert(DatanodeReportType t) {
+    switch (t) {
+    case ALL: return DatanodeReportTypeProto.ALL;
+    case LIVE: return DatanodeReportTypeProto.LIVE;
+    case DEAD: return DatanodeReportTypeProto.DEAD;
+    default: 
+      throw new IllegalArgumentException("Unexpected data type report:" + t);
+    }
+  }
+  
+  public static DatanodeReportType 
+    convert(DatanodeReportTypeProto t) {
+    switch (t) {
+    case ALL: return DatanodeReportType.ALL;
+    case LIVE: return DatanodeReportType.LIVE;
+    case DEAD: return DatanodeReportType.DEAD;
+    default: 
+      throw new IllegalArgumentException("Unexpected data type report:" + t);
+    }
+  }
+
+  public static SafeModeActionProto convert(
+      SafeModeAction a) {
+    switch (a) {
+    case SAFEMODE_LEAVE:
+      return SafeModeActionProto.SAFEMODE_LEAVE;
+    case SAFEMODE_ENTER:
+      return SafeModeActionProto.SAFEMODE_ENTER;
+    case SAFEMODE_GET:
+      return SafeModeActionProto.SAFEMODE_GET;
+    default:
+      throw new IllegalArgumentException("Unexpected SafeModeAction :" + a);
+    }
+  }
+  
+  public static SafeModeAction convert(
+      ClientNamenodeProtocolProtos.SafeModeActionProto a) {
+    switch (a) {
+    case SAFEMODE_LEAVE:
+      return SafeModeAction.SAFEMODE_LEAVE;
+    case SAFEMODE_ENTER:
+      return SafeModeAction.SAFEMODE_ENTER;
+    case SAFEMODE_GET:
+      return SafeModeAction.SAFEMODE_GET;
+    default:
+      throw new IllegalArgumentException("Unexpected SafeModeAction :" + a);
+    }
+  }
+  
+  public static UpgradeActionProto convert(
+      UpgradeAction a) {
+    switch (a) {
+    case GET_STATUS:
+      return UpgradeActionProto.GET_STATUS;
+    case DETAILED_STATUS:
+      return UpgradeActionProto.DETAILED_STATUS;
+    case FORCE_PROCEED:
+      return UpgradeActionProto.FORCE_PROCEED;
+    default:
+      throw new IllegalArgumentException("Unexpected UpgradeAction :" + a);
+    }
+  }
+  
+  
+  public static UpgradeAction convert(
+      UpgradeActionProto a) {
+    switch (a) {
+    case GET_STATUS:
+      return UpgradeAction.GET_STATUS;
+    case DETAILED_STATUS:
+      return UpgradeAction.DETAILED_STATUS;
+    case FORCE_PROCEED:
+      return UpgradeAction.FORCE_PROCEED;
+    default:
+      throw new IllegalArgumentException("Unexpected UpgradeAction :" + a);
+    }
+  }
+
+  public static UpgradeStatusReportProto convert(UpgradeStatusReport r) {
+    if (r == null)
+      return null;
+    return UpgradeStatusReportProto.newBuilder()
+        .setVersion(r.getVersion())
+        .setUpgradeStatus(r.getUpgradeStatus())
+        .setFinalized(r.isFinalized())
+        .build();
+  }
+  
+  public static UpgradeStatusReport convert(UpgradeStatusReportProto r) {
+    if (r == null) return null;
+    return new UpgradeStatusReport(r.getVersion(),
+        (short) r.getUpgradeStatus(), r.getFinalized());
+  }
+  
+  public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
+    if (c == null)
+      return null;
+    return new CorruptFileBlocks((String[]) c.getFilesList().toArray(),
+        c.getCookie());
+  }
+
+  public static CorruptFileBlocksProto convert(CorruptFileBlocks c) {
+    if (c == null)
+      return null;
+    return CorruptFileBlocksProto.newBuilder().
+        addAllFiles(Arrays.asList(c.getFiles())).
+        setCookie(c.getCookie()).
+        build();
+  }
+  
+  public static ContentSummary convert(ContentSummaryProto cs) {
+    if (cs == null) return null;
+    return new ContentSummary(
+      cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(),
+      cs.getSpaceConsumed(), cs.getSpaceQuota());
+  }
+  
+  public static ContentSummaryProto convert(ContentSummary cs) {
+    if (cs == null) return null;
+    return ContentSummaryProto.newBuilder().
+        setLength(cs.getLength()).
+        setFileCount(cs.getFileCount()).
+        setDirectoryCount(cs.getDirectoryCount()).
+        setQuota(cs.getQuota()).
+        setSpaceConsumed(cs.getSpaceConsumed()).
+        setSpaceQuota(cs.getSpaceQuota()).
+        build();
+  }
+
+  public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) {
+    if (s == null) return null;
+    switch (s.getState()) {
+    case ACTIVE:
+      return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.ACTIVE, s.getTxid());
+    case STANDBY:
+      return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.STANDBY, s.getTxid());
+    default:
+      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState());
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeProtocolServerSideTranslatorR23.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeProtocolServerSideTranslatorR23.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeProtocolServerSideTranslatorR23.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeProtocolServerSideTranslatorR23.java Mon Dec 12 19:41:20 2011
@@ -322,7 +322,8 @@ public class ClientNamenodeProtocolServe
   }
 
   @Override
-  public boolean restoreFailedStorage(String arg) throws AccessControlException {
+  public boolean restoreFailedStorage(String arg)
+      throws AccessControlException, IOException {
     return server.restoreFailedStorage(arg);
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeProtocolTranslatorR23.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeProtocolTranslatorR23.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeProtocolTranslatorR23.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeProtocolTranslatorR23.java Mon Dec 12 19:41:20 2011
@@ -338,7 +338,8 @@ public class ClientNamenodeProtocolTrans
   }
 
   @Override
-  public boolean restoreFailedStorage(String arg) throws AccessControlException {
+  public boolean restoreFailedStorage(String arg)
+      throws AccessControlException, IOException{
     return rpcProxy.restoreFailedStorage(arg);
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeWireProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeWireProtocol.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeWireProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientNamenodeWireProtocol.java Mon Dec 12 19:41:20 2011
@@ -325,7 +325,8 @@ public interface ClientNamenodeWireProto
    * The specification of this method matches that of
    * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String)}
    */
-  public boolean restoreFailedStorage(String arg) throws AccessControlException;
+  public boolean restoreFailedStorage(String arg) 
+      throws AccessControlException, IOException;
 
   /**
    * The specification of this method matches that of

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java Mon Dec 12 19:41:20 2011
@@ -38,6 +38,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
@@ -48,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
@@ -209,14 +212,15 @@ class NameNodeConnector {
     methodNameToPolicyMap.put("getBlocks", methodPolicy);
     methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
 
-    return (NamenodeProtocol) RetryProxy.create(NamenodeProtocol.class,
-        RPC.getProxy(NamenodeProtocol.class,
-            NamenodeProtocol.versionID,
-            address,
-            UserGroupInformation.getCurrentUser(),
-            conf,
-            NetUtils.getDefaultSocketFactory(conf)),
-        methodNameToPolicyMap);
+    RPC.setProtocolEngine(conf, NamenodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+    NamenodeProtocolPB proxy = RPC.getProxy(NamenodeProtocolPB.class,
+            RPC.getProtocolVersion(NamenodeProtocolPB.class), address,
+            UserGroupInformation.getCurrentUser(), conf,
+            NetUtils.getDefaultSocketFactory(conf));
+    NamenodeProtocolPB retryProxy = (NamenodeProtocolPB) RetryProxy.create(
+        NamenodeProtocolPB.class, proxy, methodNameToPolicyMap);
+    return new NamenodeProtocolTranslatorPB(retryProxy);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Dec 12 19:41:20 2011
@@ -88,6 +88,7 @@ import org.apache.hadoop.fs.LocalFileSys
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -104,9 +105,15 @@ import org.apache.hadoop.hdfs.protocol.R
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeProtocolServerSideTranslatorR23;
+import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
+import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@ -131,18 +138,15 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
-import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeProtocolServerSideTranslatorR23;
-import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeProtocolTranslatorR23;
-import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeWireProtocol;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
@@ -167,6 +171,7 @@ import org.mortbay.util.ajax.JSON;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;
+import com.google.protobuf.BlockingService;
 
 
 /**********************************************************
@@ -382,21 +387,23 @@ public class DataNode extends Configured
         conf.get(DFS_DATANODE_IPC_ADDRESS_KEY));
     
     // Add all the RPC protocols that the Datanode implements    
-    ClientDatanodeProtocolServerSideTranslatorR23 
-        clientDatanodeProtocolServerTranslator = 
-          new ClientDatanodeProtocolServerSideTranslatorR23(this);
-    ipcServer = RPC.getServer(
-      org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class,
-      clientDatanodeProtocolServerTranslator, ipcAddr.getHostName(),
-                              ipcAddr.getPort(), 
-                              conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
-                                          DFS_DATANODE_HANDLER_COUNT_DEFAULT), 
-                              false, conf, blockPoolTokenSecretManager);
-    InterDatanodeProtocolServerSideTranslatorR23 
-        interDatanodeProtocolServerTranslator = 
-          new InterDatanodeProtocolServerSideTranslatorR23(this);
-    ipcServer.addProtocol(RpcKind.RPC_WRITABLE, InterDatanodeWireProtocol.class, 
-        interDatanodeProtocolServerTranslator);
+    RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+    ClientDatanodeProtocolServerSideTranslatorPB clientDatanodeProtocolXlator = 
+          new ClientDatanodeProtocolServerSideTranslatorPB(this);
+    BlockingService service = ClientDatanodeProtocolService
+        .newReflectiveBlockingService(clientDatanodeProtocolXlator);
+    ipcServer = RPC.getServer(ClientDatanodeProtocolPB.class, service, ipcAddr
+        .getHostName(), ipcAddr.getPort(), conf.getInt(
+        DFS_DATANODE_HANDLER_COUNT_KEY, DFS_DATANODE_HANDLER_COUNT_DEFAULT),
+        false, conf, blockPoolTokenSecretManager);
+    
+    InterDatanodeProtocolServerSideTranslatorPB interDatanodeProtocolXlator = 
+        new InterDatanodeProtocolServerSideTranslatorPB(this);
+    service = InterDatanodeProtocolService
+        .newReflectiveBlockingService(interDatanodeProtocolXlator);
+    DFSUtil.addPBProtocol(conf, InterDatanodeProtocolPB.class, service,
+        ipcServer);
     
     // set service-level authorization security policy
     if (conf.getBoolean(
@@ -922,7 +929,7 @@ public class DataNode extends Configured
       return loginUgi
           .doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
             public InterDatanodeProtocol run() throws IOException {
-              return new InterDatanodeProtocolTranslatorR23(addr, loginUgi,
+              return new InterDatanodeProtocolTranslatorPB(addr, loginUgi,
                   conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
             }
           });

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Mon Dec 12 19:41:20 2011
@@ -29,8 +29,11 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolServerSideTranslatorR23;
-import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
+import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
+import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -40,9 +43,10 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 
+import com.google.protobuf.BlockingService;
+
 /**
  * BackupNode.
  * <p>
@@ -210,10 +214,12 @@ public class BackupNode extends NameNode
     private BackupNodeRpcServer(Configuration conf, BackupNode nn)
         throws IOException {
       super(conf, nn);
-      JournalProtocolServerSideTranslatorR23 journalProtocolTranslator = 
-          new JournalProtocolServerSideTranslatorR23(this);
-      this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE, JournalWireProtocol.class,
-          journalProtocolTranslator);
+      JournalProtocolServerSideTranslatorPB journalProtocolTranslator = 
+          new JournalProtocolServerSideTranslatorPB(this);
+      BlockingService service = JournalProtocolService
+          .newReflectiveBlockingService(journalProtocolTranslator);
+      DFSUtil.addPBProtocol(conf, JournalProtocolPB.class, service,
+          this.clientRpcServer);
       nnRpcAddress = nn.nnRpcAddress;
     }
 
@@ -273,9 +279,11 @@ public class BackupNode extends NameNode
   private NamespaceInfo handshake(Configuration conf) throws IOException {
     // connect to name node
     InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
-    this.namenode =
-      RPC.waitForProxy(NamenodeProtocol.class,
-          NamenodeProtocol.versionID, nnAddress, conf);
+    NamenodeProtocolPB proxy = 
+      RPC.waitForProxy(NamenodeProtocolPB.class,
+          RPC.getProtocolVersion(NamenodeProtocolPB.class),
+          nnAddress, conf);
+    this.namenode = new NamenodeProtocolTranslatorPB(proxy);
     this.nnRpcAddress = getHostPortString(nnAddress);
     this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf));
     // get version and id info from the name-node

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Mon Dec 12 19:41:20 2011
@@ -22,7 +22,7 @@ import java.net.InetSocketAddress;
 import java.util.Arrays;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolTranslatorR23;
+import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -57,7 +57,7 @@ class EditLogBackupOutputStream extends 
       NetUtils.createSocketAddr(bnRegistration.getAddress());
     try {
       this.backupNode =
-          new JournalProtocolTranslatorR23(bnAddress, new HdfsConfiguration());
+          new JournalProtocolTranslatorPB(bnAddress, new HdfsConfiguration());
     } catch(IOException e) {
       Storage.LOG.error("Error connecting to: " + bnAddress, e);
       throw e;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java Mon Dec 12 19:41:20 2011
@@ -199,7 +199,9 @@ public class NameNodeHttpServer {
   }
   
   public void stop() throws Exception {
-    httpServer.stop();
+    if (httpServer != null) {
+      httpServer.stop();
+    }
   }
 
   public InetSocketAddress getHttpAddress() {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Mon Dec 12 19:41:20 2011
@@ -42,6 +42,8 @@ import static org.apache.hadoop.hdfs.DFS
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
+
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -60,6 +62,9 @@ import org.apache.hadoop.hdfs.protocol.U
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
 import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeProtocolServerSideTranslatorR23;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
@@ -103,6 +108,8 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 
+import com.google.protobuf.BlockingService;
+
 /**
  * This class is responsible for handling all of the RPC calls to the NameNode.
  * It is created, started, and stopped by {@link NameNode}.
@@ -141,6 +148,11 @@ class NameNodeRpcServer implements Namen
     clientProtocolServerTranslator = 
         new ClientNamenodeProtocolServerSideTranslatorR23(this);
     
+    NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator = 
+        new NamenodeProtocolServerSideTranslatorPB(this);
+    BlockingService service = NamenodeProtocolService
+        .newReflectiveBlockingService(namenodeProtocolXlator);
+    
     InetSocketAddress dnSocketAddr = nn.getServiceRpcServerAddress(conf);
     if (dnSocketAddr != null) {
       int serviceHandlerCount =
@@ -156,8 +168,6 @@ class NameNodeRpcServer implements Namen
       this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
           DatanodeProtocol.class, this);
       this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
-          NamenodeProtocol.class, this);
-      this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
           RefreshAuthorizationPolicyProtocol.class, this);
       this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
           RefreshUserMappingsProtocol.class, this);
@@ -165,6 +175,8 @@ class NameNodeRpcServer implements Namen
           GetUserMappingsProtocol.class, this);
       this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
           HAServiceProtocol.class, this);
+      DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, service,
+          serviceRpcServer);
       
       this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
       nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
@@ -182,8 +194,6 @@ class NameNodeRpcServer implements Namen
     this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
         DatanodeProtocol.class, this);
     this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
-        NamenodeProtocol.class, this);
-    this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
         RefreshAuthorizationPolicyProtocol.class, this);
     this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
         RefreshUserMappingsProtocol.class, this);
@@ -191,7 +201,8 @@ class NameNodeRpcServer implements Namen
         GetUserMappingsProtocol.class, this);
     this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
         HAServiceProtocol.class, this);
-    
+    DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, service,
+        clientRpcServer);
 
     // set service-level authorization security policy
     if (serviceAuthEnabled =

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Mon Dec 12 19:41:20 2011
@@ -48,6 +48,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
@@ -217,9 +219,10 @@ public class SecondaryNameNode implement
     nameNodeAddr = NameNode.getServiceAddress(conf, true);
 
     this.conf = conf;
-    this.namenode =
-        (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
-            NamenodeProtocol.versionID, nameNodeAddr, conf);
+    NamenodeProtocolPB proxy = 
+        RPC.waitForProxy(NamenodeProtocolPB.class,
+            RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, conf);
+    this.namenode = new NamenodeProtocolTranslatorPB(proxy);
 
     // initialize checkpoint directories
     fsName = getInfoServer();

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Mon Dec 12 19:41:20 2011
@@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.web.WebHdf
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
+import org.apache.hadoop.hdfs.web.resources.CreateParentParam;
 import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
 import org.apache.hadoop.hdfs.web.resources.DestinationParam;
@@ -245,12 +246,14 @@ public class NamenodeWebHdfsMethods {
           final AccessTimeParam accessTime,
       @QueryParam(RenameOptionSetParam.NAME) @DefaultValue(RenameOptionSetParam.DEFAULT)
           final RenameOptionSetParam renameOptions,
+      @QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT)
+          final CreateParentParam createParent,
       @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
           final TokenArgumentParam delegationTokenArgument
       ) throws IOException, InterruptedException {
     return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
         owner, group, permission, overwrite, bufferSize, replication,
-        blockSize, modificationTime, accessTime, renameOptions,
+        blockSize, modificationTime, accessTime, renameOptions, createParent,
         delegationTokenArgument);
   }
 
@@ -292,6 +295,8 @@ public class NamenodeWebHdfsMethods {
           final AccessTimeParam accessTime,
       @QueryParam(RenameOptionSetParam.NAME) @DefaultValue(RenameOptionSetParam.DEFAULT)
           final RenameOptionSetParam renameOptions,
+      @QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT)
+          final CreateParentParam createParent,
       @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
           final TokenArgumentParam delegationTokenArgument
       ) throws IOException, InterruptedException {
@@ -325,6 +330,12 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString("boolean", b);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case CREATESYMLINK:
+    {
+      np.createSymlink(destination.getValue(), fullpath,
+          PermissionParam.getDefaultFsPermission(), createParent.getValue());
+      return Response.ok().type(MediaType.APPLICATION_JSON).build();
+    }
     case RENAME:
     {
       final EnumSet<Options.Rename> s = renameOptions.getValue();
@@ -578,6 +589,17 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(token);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case GETDELEGATIONTOKENS:
+    {
+      if (delegation.getValue() != null) {
+        throw new IllegalArgumentException(delegation.getName()
+            + " parameter is not null.");
+      }
+      final Token<? extends TokenIdentifier>[] tokens = new Token<?>[1];
+      tokens[0] = generateDelegationToken(namenode, ugi, renewer.getValue());
+      final String js = JsonUtil.toJsonString(tokens);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
     case GETHOMEDIRECTORY:
     {
       final String js = JsonUtil.toJsonString(

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Mon Dec 12 19:41:20 2011
@@ -66,6 +66,13 @@ implements Writable, NodeRegistration {
     this(nodeName, new StorageInfo(), new ExportedBlockKeys());
   }
   
+  public DatanodeRegistration(DatanodeID dn, StorageInfo info,
+      ExportedBlockKeys keys) {
+    super(dn);
+    this.storageInfo = info;
+    this.exportedKeys = keys;
+  }
+  
   public DatanodeRegistration(String nodeName, StorageInfo info,
       ExportedBlockKeys keys) {
     super(nodeName);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java Mon Dec 12 19:41:20 2011
@@ -40,7 +40,7 @@ import org.apache.hadoop.io.WritableFact
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class UpgradeCommand extends DatanodeCommand {
-  final static int UC_ACTION_UNKNOWN = DatanodeProtocol.DNA_UNKNOWN;
+  public final static int UC_ACTION_UNKNOWN = DatanodeProtocol.DNA_UNKNOWN;
   public final static int UC_ACTION_REPORT_STATUS = 100; // report upgrade status
   public final static int UC_ACTION_START_UPGRADE = 101; // start upgrade
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Mon Dec 12 19:41:20 2011
@@ -97,6 +97,59 @@ public class JsonUtil {
     return (Token<BlockTokenIdentifier>)toToken(m);
   }
 
+  /** Convert a Token[] to a JSON array. */
+  private static Object[] toJsonArray(final Token<? extends TokenIdentifier>[] array
+      ) throws IOException {
+    if (array == null) {
+      return null;
+    } else if (array.length == 0) {
+      return EMPTY_OBJECT_ARRAY;
+    } else {
+      final Object[] a = new Object[array.length];
+      for(int i = 0; i < array.length; i++) {
+        a[i] = toJsonMap(array[i]);
+      }
+      return a;
+    }
+  }
+
+  /** Convert a token object to a JSON string. */
+  public static String toJsonString(final Token<? extends TokenIdentifier>[] tokens
+      ) throws IOException {
+    if (tokens == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = new TreeMap<String, Object>();
+    m.put(Token.class.getSimpleName(), toJsonArray(tokens));
+    return toJsonString(Token.class.getSimpleName() + "s", m);
+  }
+
+  /** Convert an Object[] to a List<Token<?>>.  */
+  private static List<Token<?>> toTokenList(final Object[] objects) throws IOException {
+    if (objects == null) {
+      return null;
+    } else if (objects.length == 0) {
+      return Collections.emptyList();
+    } else {
+      final List<Token<?>> list = new ArrayList<Token<?>>(objects.length);
+      for(int i = 0; i < objects.length; i++) {
+        list.add(toToken((Map<?, ?>)objects[i]));
+      }
+      return list;
+    }
+  }
+
+  /** Convert a JSON map to a List<Token<?>>. */
+  public static List<Token<?>> toTokenList(final Map<?, ?> json) throws IOException {
+    if (json == null) {
+      return null;
+    }
+
+    final Map<?, ?> m = (Map<?, ?>)json.get(Token.class.getSimpleName() + "s");
+    return toTokenList((Object[])m.get(Token.class.getSimpleName()));
+  }
+
   /** Convert an exception object to a Json string. */
   public static String toJsonString(final Exception e) {
     final Map<String, Object> m = new TreeMap<String, Object>();

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Mon Dec 12 19:41:20 2011
@@ -29,7 +29,6 @@ import java.net.MalformedURLException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
-import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
@@ -64,6 +63,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
+import org.apache.hadoop.hdfs.web.resources.CreateParentParam;
 import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
 import org.apache.hadoop.hdfs.web.resources.DestinationParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
@@ -318,8 +318,9 @@ public class WebHdfsFileSystem extends F
         + '&' + new UserParam(ugi)
         + Param.toSortedString("&", parameters);
     final URL url;
-    if (op.equals(PutOpParam.Op.RENEWDELEGATIONTOKEN)
-        || op.equals(GetOpParam.Op.GETDELEGATIONTOKEN)) {
+    if (op == PutOpParam.Op.RENEWDELEGATIONTOKEN
+        || op == GetOpParam.Op.GETDELEGATIONTOKEN
+        || op == GetOpParam.Op.GETDELEGATIONTOKENS) {
       // Skip adding delegation token for getting or renewing delegation token,
       // because these operations require kerberos authentication.
       url = getNamenodeURL(path, query);
@@ -458,6 +459,18 @@ public class WebHdfsFileSystem extends F
     return (Boolean)json.get("boolean");
   }
 
+  /**
+   * Create a symlink pointing to the destination path.
+   * @see org.apache.hadoop.fs.Hdfs#createSymlink(Path, Path, boolean) 
+   */
+  public void createSymlink(Path destination, Path f, boolean createParent
+      ) throws IOException {
+    statistics.incrementWriteOps(1);
+    final HttpOpParam.Op op = PutOpParam.Op.CREATESYMLINK;
+    run(op, f, new DestinationParam(makeQualified(destination).toUri().getPath()),
+        new CreateParentParam(createParent));
+  }
+
   @Override
   public boolean rename(final Path src, final Path dst) throws IOException {
     statistics.incrementWriteOps(1);
@@ -703,8 +716,13 @@ public class WebHdfsFileSystem extends F
   @Override
   public List<Token<?>> getDelegationTokens(final String renewer
       ) throws IOException {
-    final Token<?>[] t = {getDelegationToken(renewer)};
-    return Arrays.asList(t);
+    final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKENS;
+    final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
+    final List<Token<?>> tokens = JsonUtil.toTokenList(m);
+    for(Token<?> t : tokens) {
+      SecurityUtil.setTokenService(t, nnAddr);
+    }
+    return tokens;
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Mon Dec 12 19:41:20 2011
@@ -32,6 +32,7 @@ public class GetOpParam extends HttpOpPa
 
     GETHOMEDIRECTORY(HttpURLConnection.HTTP_OK),
     GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
+    GETDELEGATIONTOKENS(HttpURLConnection.HTTP_OK),
 
     /** GET_BLOCK_LOCATIONS is a private unstable op. */
     GET_BLOCK_LOCATIONS(HttpURLConnection.HTTP_OK),



Mime
View raw message