hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1242635 [2/5] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs/ hadoop-hdfs/src/main/bin/ hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main...
Date Fri, 10 Feb 2012 01:49:24 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java Fri Feb 10 01:49:08 2012
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
@@ -41,6 +40,9 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
@@ -49,9 +51,13 @@ import org.apache.hadoop.hdfs.protocolR2
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
@@ -85,8 +91,12 @@ public class DatanodeProtocolServerSideT
     DatanodeRegistration registration = PBHelper.convert(request
         .getRegistration());
     DatanodeRegistration registrationResp;
+    DatanodeStorage[] storages = new DatanodeStorage[request.getStoragesCount()];
+    for (int i = 0; i < request.getStoragesCount(); i++) {
+      storages[i] = PBHelper.convert(request.getStorages(i));
+    }
     try {
-      registrationResp = impl.registerDatanode(registration);
+      registrationResp = impl.registerDatanode(registration, storages);
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -99,10 +109,17 @@ public class DatanodeProtocolServerSideT
       HeartbeatRequestProto request) throws ServiceException {
     HeartbeatResponse response;
     try {
+      List<StorageReportProto> list = request.getReportsList();
+      StorageReport[] report = new StorageReport[list.size()];
+      int i = 0;
+      for (StorageReportProto p : list) {
+        report[i++] = new StorageReport(p.getStorageID(), p.getFailed(),
+            p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
+            p.getBlockPoolUsed());
+      }
       response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
-          request.getCapacity(), request.getDfsUsed(), request.getRemaining(),
-          request.getBlockPoolUsed(), request.getXmitsInProgress(),
-          request.getXceiverCount(), request.getFailedVolumes());
+          report, request.getXmitsInProgress(), request.getXceiverCount(),
+          request.getFailedVolumes());
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -124,14 +141,21 @@ public class DatanodeProtocolServerSideT
   public BlockReportResponseProto blockReport(RpcController controller,
       BlockReportRequestProto request) throws ServiceException {
     DatanodeCommand cmd = null;
-    List<Long> blockIds = request.getBlocksList();
-    long[] blocks = new long[blockIds.size()];
-    for (int i = 0; i < blockIds.size(); i++) {
-      blocks[i] = blockIds.get(i);
+    StorageBlockReport[] report = 
+        new StorageBlockReport[request.getReportsCount()];
+    
+    int index = 0;
+    for (StorageBlockReportProto s : request.getReportsList()) {
+      List<Long> blockIds = s.getBlocksList();
+      long[] blocks = new long[blockIds.size()];
+      for (int i = 0; i < blockIds.size(); i++) {
+        blocks[i] = blockIds.get(i);
+      }
+      report[index++] = new StorageBlockReport(s.getStorageID(), blocks);
     }
     try {
       cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
-          request.getBlockPoolId(), blocks);
+          request.getBlockPoolId(), report);
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -147,11 +171,18 @@ public class DatanodeProtocolServerSideT
   public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
       RpcController controller, BlockReceivedAndDeletedRequestProto request)
       throws ServiceException {
-    List<ReceivedDeletedBlockInfoProto> rdbip = request.getBlocksList();
-    ReceivedDeletedBlockInfo[] info = 
-        new ReceivedDeletedBlockInfo[rdbip.size()];
-    for (int i = 0; i < rdbip.size(); i++) {
-      info[i] = PBHelper.convert(rdbip.get(i));
+    List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
+    StorageReceivedDeletedBlocks[] info = 
+        new StorageReceivedDeletedBlocks[sBlocks.size()];
+    for (int i = 0; i < sBlocks.size(); i++) {
+      StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
+      List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
+      ReceivedDeletedBlockInfo[] rdBlocks = 
+          new ReceivedDeletedBlockInfo[list.size()];
+      for (int j = 0; j < list.size(); j++) {
+        rdBlocks[j] = PBHelper.convert(list.get(j));
+      }
+      info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageID(), rdBlocks);
     }
     try {
       impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()),

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java Fri Feb 10 01:49:08 2012
@@ -29,8 +29,11 @@ import org.apache.hadoop.hdfs.protocolR2
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
@@ -39,7 +42,7 @@ import com.google.protobuf.RpcController
 import com.google.protobuf.ServiceException;
 
 public class GetUserMappingsProtocolClientSideTranslatorPB implements
-    GetUserMappingsProtocol, Closeable {
+    ProtocolMetaInterface, GetUserMappingsProtocol, Closeable {
 
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
@@ -86,4 +89,11 @@ public class GetUserMappingsProtocolClie
     }
     return resp.getGroupsList().toArray(new String[resp.getGroupsCount()]);
   }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil.isMethodSupported(rpcProxy,
+        GetUserMappingsProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(GetUserMappingsProtocolPB.class), methodName);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java Fri Feb 10 01:49:08 2012
@@ -37,8 +37,11 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import com.google.protobuf.RpcController;
@@ -52,7 +55,7 @@ import com.google.protobuf.ServiceExcept
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class InterDatanodeProtocolTranslatorPB implements
-    InterDatanodeProtocol, Closeable {
+    ProtocolMetaInterface, InterDatanodeProtocol, Closeable {
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
   final private InterDatanodeProtocolPB rpcProxy;
@@ -116,4 +119,11 @@ public class InterDatanodeProtocolTransl
       throw ProtobufHelper.getRemoteException(e);
     }
   }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil.isMethodSupported(rpcProxy,
+        InterDatanodeProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(InterDatanodeProtocolPB.class), methodName);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java Fri Feb 10 01:49:08 2012
@@ -31,8 +31,11 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
@@ -44,7 +47,8 @@ import com.google.protobuf.ServiceExcept
  */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
-public class JournalProtocolTranslatorPB implements JournalProtocol, Closeable {
+public class JournalProtocolTranslatorPB implements ProtocolMetaInterface,
+    JournalProtocol, Closeable {
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
   private final JournalProtocolPB rpcProxy;
@@ -103,4 +107,11 @@ public class JournalProtocolTranslatorPB
       throw ProtobufHelper.getRemoteException(e);
     }
   }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil.isMethodSupported(rpcProxy, JournalProtocolPB.class,
+        RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(JournalProtocolPB.class), methodName);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java Fri Feb 10 01:49:08 2012
@@ -20,17 +20,12 @@ package org.apache.hadoop.hdfs.protocolP
 import java.io.Closeable;
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
@@ -45,21 +40,19 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -74,7 +67,7 @@ import com.google.protobuf.ServiceExcept
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
-    Closeable {
+    ProtocolMetaInterface, Closeable {
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
   
@@ -241,4 +234,11 @@ public class NamenodeProtocolTranslatorP
       throw ProtobufHelper.getRemoteException(e);
     }
   }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil.isMethodSupported(rpcProxy, NamenodeProtocolPB.class,
+        RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(NamenodeProtocolPB.class), methodName);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Fri Feb 10 01:49:08 2012
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.ContentSumma
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -52,11 +53,14 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@@ -109,6 +113,8 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -121,6 +127,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
@@ -1301,4 +1308,41 @@ public class PBHelper {
     builder.setTxid(hb.getTxId());
     return builder.build();
   }
+
+  public static DatanodeStorageProto convert(DatanodeStorage s) {
+    return DatanodeStorageProto.newBuilder()
+        .setState(PBHelper.convert(s.getState()))
+        .setStorageID(s.getStorageID()).build();
+  }
+
+  private static StorageState convert(State state) {
+    switch(state) {
+    case READ_ONLY:
+      return StorageState.READ_ONLY;
+    case NORMAL:
+    default:
+      return StorageState.NORMAL;
+    }
+  }
+
+  public static DatanodeStorage convert(DatanodeStorageProto s) {
+    return new DatanodeStorage(s.getStorageID(), PBHelper.convert(s.getState()));
+  }
+
+  private static State convert(StorageState state) {
+    switch(state) {
+    case READ_ONLY:
+      return DatanodeStorage.State.READ_ONLY;
+    case NORMAL:
+    default:
+      return DatanodeStorage.State.NORMAL;
+    }
+  }
+
+  public static StorageReportProto convert(StorageReport r) {
+    return StorageReportProto.newBuilder()
+        .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
+        .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
+        .setStorageID(r.getStorageID()).build();
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java Fri Feb 10 01:49:08 2012
@@ -28,8 +28,11 @@ import org.apache.hadoop.hdfs.protocolR2
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
@@ -38,7 +41,7 @@ import com.google.protobuf.RpcController
 import com.google.protobuf.ServiceException;
 
 public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
-    RefreshAuthorizationPolicyProtocol, Closeable {
+    ProtocolMetaInterface, RefreshAuthorizationPolicyProtocol, Closeable {
 
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
@@ -83,4 +86,13 @@ public class RefreshAuthorizationPolicyP
       throw ProtobufHelper.getRemoteException(se);
     }
   }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil.isMethodSupported(rpcProxy,
+        RefreshAuthorizationPolicyProtocolPB.class,
+        RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(RefreshAuthorizationPolicyProtocolPB.class),
+        methodName);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java Fri Feb 10 01:49:08 2012
@@ -29,8 +29,11 @@ import org.apache.hadoop.hdfs.protocolR2
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -39,7 +42,7 @@ import com.google.protobuf.RpcController
 import com.google.protobuf.ServiceException;
 
 public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
-    RefreshUserMappingsProtocol, Closeable {
+    ProtocolMetaInterface, RefreshUserMappingsProtocol, Closeable {
 
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
@@ -95,4 +98,13 @@ public class RefreshUserMappingsProtocol
       throw ProtobufHelper.getRemoteException(se);
     }
   }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil
+        .isMethodSupported(rpcProxy, RefreshUserMappingsProtocolPB.class,
+            RpcKind.RPC_PROTOCOL_BUFFER,
+            RPC.getProtocolVersion(RefreshUserMappingsProtocolPB.class),
+            methodName);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java Fri Feb 10 01:49:08 2012
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
@@ -308,8 +309,7 @@ public class DelegationTokenSecretManage
     }
 
     final InetSocketAddress addr = namenode.getNameNodeAddress();
-    final String s = addr.getAddress().getHostAddress() + ":" + addr.getPort();
-    token.setService(new Text(s));
+    SecurityUtil.setTokenService(token, addr);
     final Credentials c = new Credentials();
     c.addToken(new Text(ugi.getShortUserName()), token);
     return c;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java Fri Feb 10 01:49:08 2012
@@ -17,27 +17,38 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.LinkedList;
+
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.util.LightWeightGSet;
 
 /**
- * Internal class for block metadata.
+ * BlockInfo class maintains for a given block
+ * the {@link INodeFile} it is part of and datanodes where the replicas of 
+ * the block are stored.
  */
-public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
+@InterfaceAudience.Private
+public class BlockInfo extends Block implements
+    LightWeightGSet.LinkedElement {
   private INodeFile inode;
 
   /** For implementing {@link LightWeightGSet.LinkedElement} interface */
   private LightWeightGSet.LinkedElement nextLinkedElement;
 
   /**
-   * This array contains triplets of references.
-   * For each i-th datanode the block belongs to
-   * triplets[3*i] is the reference to the DatanodeDescriptor
-   * and triplets[3*i+1] and triplets[3*i+2] are references 
-   * to the previous and the next blocks, respectively, in the 
-   * list of blocks belonging to this data-node.
+   * This array contains triplets of references. For each i-th datanode the
+   * block belongs to triplets[3*i] is the reference to the DatanodeDescriptor
+   * and triplets[3*i+1] and triplets[3*i+2] are references to the previous and
+   * the next blocks, respectively, in the list of blocks belonging to this
+   * data-node.
+   * 
+   * Using previous and next in Object triplets is done instead of a
+   * {@link LinkedList} list to efficiently use memory. With LinkedList the cost
+   * per replica is 42 bytes (LinkedList#Entry object per replica) versus 16
+   * bytes using the triplets.
    */
   private Object[] triplets;
 
@@ -84,7 +95,7 @@ public class BlockInfo extends Block imp
     return node;
   }
 
-  BlockInfo getPrevious(int index) {
+  private BlockInfo getPrevious(int index) {
     assert this.triplets != null : "BlockInfo is not initialized";
     assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
     BlockInfo info = (BlockInfo)triplets[index*3+1];
@@ -104,22 +115,14 @@ public class BlockInfo extends Block imp
     return info;
   }
 
-  void setDatanode(int index, DatanodeDescriptor node) {
-    assert this.triplets != null : "BlockInfo is not initialized";
-    assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
-    triplets[index*3] = node;
-  }
-
-  void setPrevious(int index, BlockInfo to) {
-    assert this.triplets != null : "BlockInfo is not initialized";
-    assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
-    triplets[index*3+1] = to;
-  }
-
-  void setNext(int index, BlockInfo to) {
+  private void setDatanode(int index, DatanodeDescriptor node, BlockInfo previous,
+      BlockInfo next) {
     assert this.triplets != null : "BlockInfo is not initialized";
-    assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
-    triplets[index*3+2] = to;
+    int i = index * 3;
+    assert index >= 0 && i+2 < triplets.length : "Index is out of bound";
+    triplets[i] = node;
+    triplets[i+1] = previous;
+    triplets[i+2] = next;
   }
 
   /**
@@ -130,7 +133,7 @@ public class BlockInfo extends Block imp
    * @param to - block to be set to previous on the list of blocks
    * @return current previous block on the list of blocks
    */
-  BlockInfo getSetPrevious(int index, BlockInfo to) {
+  private BlockInfo setPrevious(int index, BlockInfo to) {
 	assert this.triplets != null : "BlockInfo is not initialized";
 	assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
     BlockInfo info = (BlockInfo)triplets[index*3+1];
@@ -146,7 +149,7 @@ public class BlockInfo extends Block imp
    * @param to - block to be set to next on the list of blocks
    *    * @return current next block on the list of blocks
    */
-  BlockInfo getSetNext(int index, BlockInfo to) {
+  private BlockInfo setNext(int index, BlockInfo to) {
 	assert this.triplets != null : "BlockInfo is not initialized";
 	assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
     BlockInfo info = (BlockInfo)triplets[index*3+2];
@@ -198,9 +201,7 @@ public class BlockInfo extends Block imp
       return false;
     // find the last null node
     int lastNode = ensureCapacity(1);
-    setDatanode(lastNode, node);
-    setNext(lastNode, null);
-    setPrevious(lastNode, null);
+    setDatanode(lastNode, node, null, null);
     return true;
   }
 
@@ -216,13 +217,10 @@ public class BlockInfo extends Block imp
     // find the last not null node
     int lastNode = numNodes()-1; 
     // replace current node triplet by the lastNode one 
-    setDatanode(dnIndex, getDatanode(lastNode));
-    setNext(dnIndex, getNext(lastNode)); 
-    setPrevious(dnIndex, getPrevious(lastNode)); 
+    setDatanode(dnIndex, getDatanode(lastNode), getPrevious(lastNode),
+        getNext(lastNode));
     // set the last triplet to null
-    setDatanode(lastNode, null);
-    setNext(lastNode, null); 
-    setPrevious(lastNode, null); 
+    setDatanode(lastNode, null, null, null);
     return true;
   }
 
@@ -300,8 +298,8 @@ public class BlockInfo extends Block imp
     if (head == this) {
       return this;
     }
-    BlockInfo next = this.getSetNext(curIndex, head);
-    BlockInfo prev = this.getSetPrevious(curIndex, null);
+    BlockInfo next = this.setNext(curIndex, head);
+    BlockInfo prev = this.setPrevious(curIndex, null);
 
     head.setPrevious(headIndex, this);
     prev.setNext(prev.findDatanode(dn), next);
@@ -331,7 +329,6 @@ public class BlockInfo extends Block imp
 
   /**
    * Convert a complete block to an under construction block.
-   * 
    * @return BlockInfoUnderConstruction -  an under construction block.
    */
   public BlockInfoUnderConstruction convertToBlockUnderConstruction(

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java Fri Feb 10 01:49:08 2012
@@ -157,9 +157,6 @@ public class BlockInfoUnderConstruction 
   BlockInfo convertToCompleteBlock() throws IOException {
     assert getBlockUCState() != BlockUCState.COMPLETE :
       "Trying to convert a COMPLETE block";
-    if(getBlockUCState() != BlockUCState.COMMITTED)
-      throw new IOException(
-          "Cannot complete block: block has not been COMMITTED by the client");
     return new BlockInfo(this);
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri Feb 10 01:49:08 2012
@@ -497,6 +497,9 @@ public class BlockManager {
     if (!force && numNodes < minReplication)
       throw new IOException("Cannot complete block: " +
           "block does not satisfy minimal replication requirement.");
+    if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED)
+      throw new IOException(
+          "Cannot complete block: block has not been COMMITTED by the client");
     BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
     // replace penultimate block in file
     fileINode.setBlock(blkIndex, completeBlock);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Fri Feb 10 01:49:08 2012
@@ -62,6 +62,7 @@ import org.apache.hadoop.http.HtmlQuotin
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
@@ -492,19 +493,18 @@ public class JspHelper {
     return UserGroupInformation.createRemoteUser(strings[0]);
   }
 
-  private static String getNNServiceAddress(ServletContext context,
+  private static InetSocketAddress getNNServiceAddress(ServletContext context,
       HttpServletRequest request) {
     String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS);
     InetSocketAddress namenodeAddress = null;
     if (namenodeAddressInUrl != null) {
-      namenodeAddress = DFSUtil.getSocketAddress(namenodeAddressInUrl);
+      namenodeAddress = NetUtils.createSocketAddr(namenodeAddressInUrl);
     } else if (context != null) {
       namenodeAddress = NameNodeHttpServer.getNameNodeAddressFromContext(
           context); 
     }
     if (namenodeAddress != null) {
-      return (namenodeAddress.getAddress().getHostAddress() + ":" 
-          + namenodeAddress.getPort());
+      return namenodeAddress;
     }
     return null;
   }
@@ -547,9 +547,9 @@ public class JspHelper {
         Token<DelegationTokenIdentifier> token = 
           new Token<DelegationTokenIdentifier>();
         token.decodeFromUrlString(tokenString);
-        String serviceAddress = getNNServiceAddress(context, request);
+        InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
         if (serviceAddress != null) {
-          token.setService(new Text(serviceAddress));
+          SecurityUtil.setTokenService(token, serviceAddress);
           token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
         }
         ByteArrayInputStream buf = new ByteArrayInputStream(token

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Fri Feb 10 01:49:08 2012
@@ -599,8 +599,12 @@ public abstract class Storage extends St
      * @throws IOException if locking fails.
      */
     FileLock tryLock() throws IOException {
+      boolean deletionHookAdded = false;
       File lockF = new File(root, STORAGE_FILE_LOCK);
-      lockF.deleteOnExit();
+      if (!lockF.exists()) {
+        lockF.deleteOnExit();
+        deletionHookAdded = true;
+      }
       RandomAccessFile file = new RandomAccessFile(lockF, "rws");
       FileLock res = null;
       try {
@@ -613,6 +617,12 @@ public abstract class Storage extends St
         file.close();
         throw e;
       }
+      if (res != null && !deletionHookAdded) {
+        // If the file existed prior to our startup, we didn't
+        // call deleteOnExit above. But since we successfully locked
+        // the dir, we can take care of cleaning it up.
+        lockF.deleteOnExit();
+      }
       return res;
     }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java Fri Feb 10 01:49:08 2012
@@ -42,6 +42,8 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
@@ -49,6 +51,13 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Fri Feb 10 01:49:08 2012
@@ -41,10 +41,14 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StringUtils;
@@ -251,8 +255,10 @@ class BPServiceActor implements Runnable
       }
     }
     if (receivedAndDeletedBlockArray != null) {
+      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
+          bpRegistration.getStorageID(), receivedAndDeletedBlockArray) };
       bpNamenode.blockReceivedAndDeleted(bpRegistration, bpos.getBlockPoolId(),
-          receivedAndDeletedBlockArray);
+          report);
       synchronized (receivedAndDeletedBlockList) {
         for (int i = 0; i < receivedAndDeletedBlockArray.length; i++) {
           receivedAndDeletedBlockList.remove(receivedAndDeletedBlockArray[i]);
@@ -354,8 +360,9 @@ class BPServiceActor implements Runnable
 
       // Send block report
       long brSendStartTime = now();
-      cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), bReport
-          .getBlockListAsLongs());
+      StorageBlockReport[] report = { new StorageBlockReport(
+          bpRegistration.getStorageID(), bReport.getBlockListAsLongs()) };
+      cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), report);
 
       // Log the block report processing stats from Datanode perspective
       long brSendCost = now() - brSendStartTime;
@@ -388,13 +395,17 @@ class BPServiceActor implements Runnable
   
   HeartbeatResponse sendHeartBeat() throws IOException {
     LOG.info("heartbeat: " + this);
-    return bpNamenode.sendHeartbeat(bpRegistration,
+    // reports number of failed volumes
+    StorageReport[] report = { new StorageReport(bpRegistration.getStorageID(),
+        false,
         dn.getFSDataset().getCapacity(),
         dn.getFSDataset().getDfsUsed(),
         dn.getFSDataset().getRemaining(),
-        dn.getFSDataset().getBlockPoolUsed(bpos.getBlockPoolId()),
+        dn.getFSDataset().getBlockPoolUsed(bpos.getBlockPoolId())) };
+    return bpNamenode.sendHeartbeat(bpRegistration, report,
         dn.getXmitsInProgress(),
-        dn.getXceiverCount(), dn.getFSDataset().getNumFailedVolumes());
+        dn.getXceiverCount(),
+        dn.getFSDataset().getNumFailedVolumes());
   }
   
   //This must be called only by BPOfferService
@@ -569,7 +580,8 @@ class BPServiceActor implements Runnable
     while (shouldRun()) {
       try {
         // Use returned registration from namenode with updated machine name.
-        bpRegistration = bpNamenode.registerDatanode(bpRegistration);
+        bpRegistration = bpNamenode.registerDatanode(bpRegistration,
+            new DatanodeStorage[0]);
         break;
       } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + nnAddr);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java Fri Feb 10 01:49:08 2012
@@ -42,7 +42,7 @@ import org.apache.hadoop.classification.
 @InterfaceStability.Evolving
 public class BlockMetadataHeader {
 
-  static final short METADATA_VERSION = FSDataset.METADATA_VERSION;
+  public static final short VERSION = 1;
   
   /**
    * Header includes everything except the checksum(s) themselves.
@@ -138,7 +138,7 @@ public class BlockMetadataHeader {
    */
   static void writeHeader(DataOutputStream out, DataChecksum checksum)
                          throws IOException {
-    writeHeader(out, new BlockMetadataHeader(METADATA_VERSION, checksum));
+    writeHeader(out, new BlockMetadataHeader(VERSION, checksum));
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Fri Feb 10 01:49:08 2012
@@ -46,15 +46,14 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
 
 /**
  * Performs two types of scanning:
  * <li> Gets block files from the data directories and reconciles the
- * difference between the blocks on the disk and in memory in
- * {@link FSDataset}</li>
+ * difference between the blocks on the disk and in memory.</li>
  * <li> Scans the data directories for block files under a block pool
  * and verifies that the files are not corrupt</li>
  * This keeps track of blocks and their last verification times.
@@ -78,7 +77,7 @@ class BlockPoolSliceScanner {
 
   private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
   private DataNode datanode;
-  private FSDataset dataset;
+  private final FSDatasetInterface dataset;
   
   // sorted set
   private TreeSet<BlockScanInfo> blockInfoSet;
@@ -137,8 +136,8 @@ class BlockPoolSliceScanner {
     }
   }
   
-  BlockPoolSliceScanner(DataNode datanode, FSDataset dataset, Configuration conf,
-      String bpid) {
+  BlockPoolSliceScanner(DataNode datanode, FSDatasetInterface dataset,
+      Configuration conf, String bpid) {
     this.datanode = datanode;
     this.dataset = dataset;
     this.blockPoolId  = bpid;
@@ -220,16 +219,16 @@ class BlockPoolSliceScanner {
      * otherwise, pick the first directory.
      */
     File dir = null;
-    List<FSVolume> volumes = dataset.volumes.getVolumes();
-    for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
-      File bpDir = vol.getBlockPoolSlice(blockPoolId).getDirectory();
+    List<FSVolumeInterface> volumes = dataset.getVolumes();
+    for (FSVolumeInterface vol : volumes) {
+      File bpDir = vol.getDirectory(blockPoolId);
       if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) {
         dir = bpDir;
         break;
       }
     }
     if (dir == null) {
-      dir = volumes.get(0).getBlockPoolSlice(blockPoolId).getDirectory();
+      dir = volumes.get(0).getDirectory(blockPoolId);
     }
     
     try {
@@ -425,9 +424,8 @@ class BlockPoolSliceScanner {
         updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false);
 
         // If the block does not exists anymore, then its not an error
-        if ( dataset.getFile(block.getBlockPoolId(), block.getLocalBlock()) == null ) {
-          LOG.info("Verification failed for " + block + ". Its ok since " +
-          "it not in datanode dataset anymore.");
+        if (!dataset.contains(block)) {
+          LOG.info(block + " is no longer in the dataset.");
           deleteBlock(block.getLocalBlock());
           return;
         }
@@ -578,8 +576,8 @@ class BlockPoolSliceScanner {
     bytesLeft += len;
   }
 
-  static File getCurrentFile(FSVolume vol, String bpid) throws IOException {
-    return LogFileHandler.getCurrentFile(vol.getBlockPoolSlice(bpid).getDirectory(),
+  static File getCurrentFile(FSVolumeInterface vol, String bpid) throws IOException {
+    return LogFileHandler.getCurrentFile(vol.getDirectory(bpid),
         BlockPoolSliceScanner.verificationLogFile);
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Fri Feb 10 01:49:08 2012
@@ -225,7 +225,7 @@ class BlockReceiver implements Closeable
       cleanupBlock();
       
       // check if there is a disk error
-      IOException cause = FSDataset.getCauseIfDiskError(ioe);
+      IOException cause = DatanodeUtil.getCauseIfDiskError(ioe);
       DataNode.LOG.warn("IOException in BlockReceiver constructor. Cause is ",
           cause);
       

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Fri Feb 10 01:49:08 2012
@@ -226,7 +226,7 @@ class BlockSender implements java.io.Clo
         // read and handle the common header here. For now just a version
         BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
         short version = header.getVersion();
-        if (version != FSDataset.METADATA_VERSION) {
+        if (version != BlockMetadataHeader.VERSION) {
           LOG.warn("Wrong version (" + version + ") for metadata file for "
               + block + " ignoring ...");
         }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java Fri Feb 10 01:49:08 2012
@@ -22,7 +22,7 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 
 /**************************************************
  * BlockVolumeChoosingPolicy allows a DataNode to
@@ -46,7 +46,7 @@ public interface BlockVolumeChoosingPoli
    * @return the chosen volume to store the block.
    * @throws IOException when disks are unavailable or are full.
    */
-  public FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
+  public FSVolumeInterface chooseVolume(List<FSVolumeInterface> volumes, long blockSize)
     throws IOException;
 
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Fri Feb 10 01:49:08 2012
@@ -27,12 +27,12 @@ import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 /**
  * DataBlockScanner manages block scanning for all the block pools. For each
@@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFac
 public class DataBlockScanner implements Runnable {
   public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
   private final DataNode datanode;
-  private final FSDataset dataset;
+  private final FSDatasetInterface dataset;
   private final Configuration conf;
   
   /**
@@ -55,7 +55,7 @@ public class DataBlockScanner implements
     new TreeMap<String, BlockPoolSliceScanner>();
   Thread blockScannerThread = null;
   
-  DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
+  DataBlockScanner(DataNode datanode, FSDatasetInterface dataset, Configuration conf) {
     this.datanode = datanode;
     this.dataset = dataset;
     this.conf = conf;
@@ -135,7 +135,7 @@ public class DataBlockScanner implements
               .iterator();
           while (bpidIterator.hasNext()) {
             String bpid = bpidIterator.next();
-            for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
+            for (FSDatasetInterface.FSVolumeInterface vol : dataset.getVolumes()) {
               try {
                 File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid);
                 if (currFile.exists()) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Feb 10 01:49:08 2012
@@ -126,7 +126,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
@@ -444,11 +443,11 @@ public class DataNode extends Configured
     if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
                     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
       reason = "verification is turned off by configuration";
-    } else if (!(data instanceof FSDataset)) {
-      reason = "verifcation is supported only with FSDataset";
+    } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
+      reason = "verifcation is not supported by SimulatedFSDataset";
     } 
     if (reason == null) {
-      blockScanner = new DataBlockScanner(this, (FSDataset)data, conf);
+      blockScanner = new DataBlockScanner(this, data, conf);
       blockScanner.start();
     } else {
       LOG.info("Periodic Block Verification scan is disabled because " +
@@ -473,11 +472,11 @@ public class DataNode extends Configured
     if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 
                     DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
       reason = "verification is turned off by configuration";
-    } else if (!(data instanceof FSDataset)) {
-      reason = "verification is supported only with FSDataset";
+    } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
+      reason = "verifcation is not supported by SimulatedFSDataset";
     } 
     if (reason == null) {
-      directoryScanner = new DirectoryScanner(this, (FSDataset) data, conf);
+      directoryScanner = new DirectoryScanner(this, data, conf);
       directoryScanner.start();
     } else {
       LOG.info("Periodic Directory Tree Verification scan is disabled because " +
@@ -1203,7 +1202,8 @@ public class DataNode extends Configured
   }
     
   /** Number of concurrent xceivers per node. */
-  int getXceiverCount() {
+  @Override // DataNodeMXBean
+  public int getXceiverCount() {
     return threadGroup == null ? 0 : threadGroup.activeCount();
   }
   
@@ -2145,16 +2145,7 @@ public class DataNode extends Configured
    */
   @Override // DataNodeMXBean
   public String getVolumeInfo() {
-    final Map<String, Object> info = new HashMap<String, Object>();
-    Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
-    for (VolumeInfo v : volumes) {
-      final Map<String, Object> innerInfo = new HashMap<String, Object>();
-      innerInfo.put("usedSpace", v.usedSpace);
-      innerInfo.put("freeSpace", v.freeSpace);
-      innerInfo.put("reservedSpace", v.reservedSpace);
-      info.put(v.directory, innerInfo);
-    }
-    return JSON.toString(info);
+    return JSON.toString(data.getVolumeInfoMap());
   }
   
   @Override // DataNodeMXBean

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java Fri Feb 10 01:49:08 2012
@@ -70,5 +70,10 @@ public interface DataNodeMXBean {
    * @return the cluster id
    */
   public String getClusterId();
-  
+
+  /**
+   * Returns an estimate of the number of Datanode threads
+   * actively transferring blocks.
+   */
+  public int getXceiverCount();
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri Feb 10 01:49:08 2012
@@ -751,7 +751,7 @@ public class DataStorage extends Storage
     Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName); 
     if (matcher.matches()) {
       //return the current metadata file name
-      return FSDataset.getMetaFileName(matcher.group(1),
+      return DatanodeUtil.getMetaFileName(matcher.group(1),
           GenerationStamp.GRANDFATHER_GENERATION_STAMP); 
     }
     return oldFileName;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Fri Feb 10 01:49:08 2012
@@ -53,18 +53,30 @@ import org.apache.hadoop.util.StringUtil
 @InterfaceAudience.Private
 public class DatanodeJspHelper {
   private static DFSClient getDFSClient(final UserGroupInformation user,
-                                        final InetSocketAddress addr,
+                                        final String addr,
                                         final Configuration conf
                                         ) throws IOException,
                                                  InterruptedException {
     return
       user.doAs(new PrivilegedExceptionAction<DFSClient>() {
         public DFSClient run() throws IOException {
-          return new DFSClient(addr, conf);
+          return new DFSClient(NetUtils.createSocketAddr(addr), conf);
         }
       });
   }
 
+  /**
+   * Internal convenience method for canonicalizing host name.
+   * @param addr name:port or name 
+   * @return canonicalized host name
+   */
+  private static String canonicalize(String addr) {
+    // default port 1 is supplied to allow addr without port.
+    // the port will be ignored.
+    return NetUtils.createSocketAddr(addr, 1).getAddress()
+           .getCanonicalHostName();
+  }
+
   private static final SimpleDateFormat lsDateFormat =
     new SimpleDateFormat("yyyy-MM-dd HH:mm");
 
@@ -102,8 +114,7 @@ public class DatanodeJspHelper {
       return;
     }
     
-    InetSocketAddress namenodeAddress = DFSUtil.getSocketAddress(nnAddr);
-    DFSClient dfs = getDFSClient(ugi, namenodeAddress, conf);
+    DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
     String target = dir;
     final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
     if (targetStatus == null) { // not exists
@@ -125,8 +136,7 @@ public class DatanodeJspHelper {
           out.print("Empty file");
         } else {
           DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
-          String fqdn = InetAddress.getByName(chosenNode.getHost())
-              .getCanonicalHostName();
+          String fqdn = canonicalize(chosenNode.getHost());
           String datanodeAddr = chosenNode.getName();
           int datanodePort = Integer.parseInt(datanodeAddr.substring(
               datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
@@ -210,9 +220,8 @@ public class DatanodeJspHelper {
         JspHelper.addTableFooter(out);
       }
     }
-    String namenodeHost = namenodeAddress.getHostName();
     out.print("<br><a href=\"http://"
-        + InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":"
+        + canonicalize(nnAddr) + ":"
         + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
     dfs.close();
   }
@@ -282,8 +291,7 @@ public class DatanodeJspHelper {
     }
     long blockSize = Long.parseLong(blockSizeStr);
 
-    final InetSocketAddress namenodeAddress = DFSUtil.getSocketAddress(nnAddr);
-    final DFSClient dfs = getDFSClient(ugi, namenodeAddress, conf);
+    final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
     List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
         Long.MAX_VALUE).getLocatedBlocks();
     // Add the various links for looking at the file contents
@@ -305,8 +313,7 @@ public class DatanodeJspHelper {
       dfs.close();
       return;
     }
-    String fqdn = InetAddress.getByName(chosenNode.getHost())
-        .getCanonicalHostName();
+    String fqdn = canonicalize(chosenNode.getHost());
     String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
         + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
         + "&namenodeInfoPort=" + namenodeInfoPort
@@ -345,9 +352,7 @@ public class DatanodeJspHelper {
     // generate a table and dump the info
     out.println("\n<table>");
     
-    String namenodeHost = namenodeAddress.getHostName();
-    String namenodeHostName = InetAddress.getByName(namenodeHost).getCanonicalHostName();
-    
+    String nnCanonicalName = canonicalize(nnAddr);
     for (LocatedBlock cur : blocks) {
       out.print("<tr>");
       final String blockidstring = Long.toString(cur.getBlock().getBlockId());
@@ -358,7 +363,7 @@ public class DatanodeJspHelper {
         String datanodeAddr = locs[j].getName();
         datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr
             .indexOf(':') + 1, datanodeAddr.length()));
-        fqdn = InetAddress.getByName(locs[j].getHost()).getCanonicalHostName();
+        fqdn = canonicalize(locs[j].getHost());
         String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
             + "/browseBlock.jsp?blockId=" + blockidstring
             + "&blockSize=" + blockSize
@@ -370,7 +375,7 @@ public class DatanodeJspHelper {
             + JspHelper.getDelegationTokenUrlParam(tokenString)
             + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
 
-        String blockInfoUrl = "http://" + namenodeHostName + ":"
+        String blockInfoUrl = "http://" + nnCanonicalName + ":"
             + namenodeInfoPort
             + "/block_info_xml.jsp?blockId=" + blockidstring;
         out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">"
@@ -382,7 +387,7 @@ public class DatanodeJspHelper {
     out.println("</table>");
     out.print("<hr>");
     out.print("<br><a href=\"http://"
-        + InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":"
+        + nnCanonicalName + ":"
         + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
     dfs.close();
   }
@@ -419,8 +424,7 @@ public class DatanodeJspHelper {
       return;
     }
     
-    final DFSClient dfs = getDFSClient(ugi, 
-        DFSUtil.getSocketAddress(nnAddr), conf);
+    final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
 
     String bpid = null;
     Token<BlockTokenIdentifier> blockToken = BlockTokenSecretManager.DUMMY_TOKEN;
@@ -518,8 +522,7 @@ public class DatanodeJspHelper {
             String datanodeAddr = d.getName();
             nextDatanodePort = Integer.parseInt(datanodeAddr.substring(
                 datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
-            nextHost = InetAddress.getByName(d.getHost())
-                .getCanonicalHostName();
+            nextHost = d.getHost();
             nextPort = d.getInfoPort();
           }
         }
@@ -533,7 +536,7 @@ public class DatanodeJspHelper {
     }
     String nextUrl = null;
     if (nextBlockIdStr != null) {
-      nextUrl = "http://" + nextHost + ":" + nextPort
+      nextUrl = "http://" + canonicalize(nextHost) + ":" + nextPort
           + "/browseBlock.jsp?blockId=" + nextBlockIdStr
           + "&blockSize=" + nextBlockSize
           + "&startOffset=" + nextStartOffset
@@ -573,8 +576,7 @@ public class DatanodeJspHelper {
             String datanodeAddr = d.getName();
             prevDatanodePort = Integer.parseInt(datanodeAddr.substring(
                 datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
-            prevHost = InetAddress.getByName(d.getHost())
-                .getCanonicalHostName();
+            prevHost = d.getHost();
             prevPort = d.getInfoPort();
           }
         }
@@ -591,7 +593,7 @@ public class DatanodeJspHelper {
 
     String prevUrl = null;
     if (prevBlockIdStr != null) {
-      prevUrl = "http://" + prevHost + ":" + prevPort
+      prevUrl = "http://" + canonicalize(prevHost) + ":" + prevPort
           + "/browseBlock.jsp?blockId=" + prevBlockIdStr
           + "&blockSize=" + prevBlockSize
           + "&startOffset=" + prevStartOffset
@@ -669,8 +671,7 @@ public class DatanodeJspHelper {
           + "\">");
 
     // fetch the block from the datanode that has the last block for this file
-    final DFSClient dfs = getDFSClient(ugi, DFSUtil.getSocketAddress(nnAddr),
-        conf);
+    final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
     List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
         Long.MAX_VALUE).getLocatedBlocks();
     if (blocks == null || blocks.size() == 0) {
@@ -710,6 +711,6 @@ public class DatanodeJspHelper {
       final DataNode datanode, final Configuration conf,
       final UserGroupInformation ugi) throws IOException, InterruptedException {
     final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
-    return getDFSClient(ugi, DFSUtil.getSocketAddress(nnAddr), conf);
+    return getDFSClient(ugi, nnAddr, conf);
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java Fri Feb 10 01:49:08 2012
@@ -43,20 +43,19 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.util.Daemon;
 
 /**
  * Periodically scans the data directories for block and block metadata files.
- * Reconciles the differences with block information maintained in
- * {@link FSDataset}
+ * Reconciles the differences with block information maintained in the dataset.
  */
 @InterfaceAudience.Private
 public class DirectoryScanner implements Runnable {
   private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
 
   private final DataNode datanode;
-  private final FSDataset dataset;
+  private final FSDatasetInterface dataset;
   private final ExecutorService reportCompileThreadPool;
   private final ScheduledExecutorService masterThread;
   private final long scanPeriodMsecs;
@@ -158,13 +157,13 @@ public class DirectoryScanner implements
     private final long blockId;
     private final File metaFile;
     private final File blockFile;
-    private final FSVolume volume;
+    private final FSVolumeInterface volume;
 
     ScanInfo(long blockId) {
       this(blockId, null, null, null);
     }
 
-    ScanInfo(long blockId, File blockFile, File metaFile, FSVolume vol) {
+    ScanInfo(long blockId, File blockFile, File metaFile, FSVolumeInterface vol) {
       this.blockId = blockId;
       this.metaFile = metaFile;
       this.blockFile = blockFile;
@@ -183,7 +182,7 @@ public class DirectoryScanner implements
       return blockId;
     }
 
-    FSVolume getVolume() {
+    FSVolumeInterface getVolume() {
       return volume;
     }
 
@@ -220,7 +219,7 @@ public class DirectoryScanner implements
     }
   }
 
-  DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
+  DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) {
     this.datanode = dn;
     this.dataset = dataset;
     int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
@@ -269,7 +268,7 @@ public class DirectoryScanner implements
         return;
       }
 
-      String[] bpids = dataset.getBPIdlist();
+      String[] bpids = dataset.getBlockPoolList();
       for(String bpid : bpids) {
         UpgradeManagerDatanode um = 
           datanode.getUpgradeManagerDatanode(bpid);
@@ -411,17 +410,29 @@ public class DirectoryScanner implements
     diffRecord.add(new ScanInfo(blockId));
   }
 
+  /** Is the given volume still valid in the dataset? */
+  private static boolean isValid(final FSDatasetInterface dataset,
+      final FSVolumeInterface volume) {
+    for (FSVolumeInterface vol : dataset.getVolumes()) {
+      if (vol == volume) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   /** Get lists of blocks on the disk sorted by blockId, per blockpool */
   private Map<String, ScanInfo[]> getDiskReport() {
     // First get list of data directories
-    List<FSVolume> volumes = dataset.volumes.getVolumes();
+    final List<FSVolumeInterface> volumes = dataset.getVolumes();
     ArrayList<ScanInfoPerBlockPool> dirReports =
       new ArrayList<ScanInfoPerBlockPool>(volumes.size());
     
     Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
       new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
     for (int i = 0; i < volumes.size(); i++) {
-      if (!dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
+      if (!isValid(dataset, volumes.get(i))) {
+        // volume is invalid
         dirReports.add(i, null);
       } else {
         ReportCompiler reportCompiler =
@@ -446,7 +457,8 @@ public class DirectoryScanner implements
     // Compile consolidated report for all the volumes
     ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
     for (int i = 0; i < volumes.size(); i++) {
-      if (dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
+      if (isValid(dataset, volumes.get(i))) {
+        // volume is still valid
         list.addAll(dirReports.get(i));
       }
     }
@@ -461,9 +473,9 @@ public class DirectoryScanner implements
 
   private static class ReportCompiler 
   implements Callable<ScanInfoPerBlockPool> {
-    private FSVolume volume;
+    private FSVolumeInterface volume;
 
-    public ReportCompiler(FSVolume volume) {
+    public ReportCompiler(FSVolumeInterface volume) {
       this.volume = volume;
     }
 
@@ -473,14 +485,14 @@ public class DirectoryScanner implements
       ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
       for (String bpid : bpList) {
         LinkedList<ScanInfo> report = new LinkedList<ScanInfo>();
-        File bpFinalizedDir = volume.getBlockPoolSlice(bpid).getFinalizedDir();
+        File bpFinalizedDir = volume.getFinalizedDir(bpid);
         result.put(bpid, compileReport(volume, bpFinalizedDir, report));
       }
       return result;
     }
 
     /** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
-    private LinkedList<ScanInfo> compileReport(FSVolume vol, File dir,
+    private LinkedList<ScanInfo> compileReport(FSVolumeInterface vol, File dir,
         LinkedList<ScanInfo> report) {
       File[] files;
       try {



Mime
View raw message