hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1211749 [1/3] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocol/prot...
Date Thu, 08 Dec 2011 02:57:51 GMT
Author: atm
Date: Thu Dec  8 02:57:47 2011
New Revision: 1211749

URL: http://svn.apache.org/viewvc?rev=1211749&view=rev
Log:
Merge trunk into HA branch.

Added:
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientDatanodeProtocolProtos.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientDatanodeProtocolProtos.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckableNameNodeResource.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckableNameNodeResource.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourcePolicy.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourcePolicy.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/ClientDatanodeProtocol.proto
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/proto/ClientDatanodeProtocol.proto
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
      - copied unchanged from r1211747, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
Modified:
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props
changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockKey.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props
changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
  (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
  (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
  (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/InterDatanodeProtocol.proto
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props
changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Dec  8 02:57:47 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1152502-1210663
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1152502-1211747
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1211749&r1=1211748&r2=1211749&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Dec 
8 02:57:47 2011
@@ -15,6 +15,15 @@ Trunk (unreleased changes)
 
     HDFS-2581. Implement protobuf service for JournalProtocol. (suresh)
 
+    HDFS-2618. Implement protobuf service for NamenodeProtocol. (suresh)
+
+    HDFS-2629. Implement protobuf service for InterDatanodeProtocol. (suresh)
+
+    HDFS-2636. Implement protobuf service for ClientDatanodeProtocol. (suresh)
+
+    HDFS-2430. The number of failed or low-resource volumes the NN can tolerate
+               should be configurable. (atm)
+
   IMPROVEMENTS
 
     HADOOP-7524 Change RPC to allow multiple protocols including multuple 

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Dec  8 02:57:47 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1159757-1210663
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1159757-1211747
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/main/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1211749&r1=1211748&r2=1211749&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
Thu Dec  8 02:57:47 2011
@@ -130,6 +130,8 @@ public class DFSConfigKeys extends Commo
   public static final boolean DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT = true;
   public static final String  DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY = "dfs.namenode.num.checkpoints.retained";
   public static final int     DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT = 2;
+  public static final String  DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
+  public static final int     DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
   
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int     DFS_LIST_LIMIT_DEFAULT = 1000;
@@ -164,6 +166,7 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_EDITS_DIR_KEY = "dfs.namenode.edits.dir";
   public static final String  DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
   public static final String  DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin";
+  public static final String  DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY = "dfs.namenode.edits.dir.required";
   public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size";

   public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
   public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
@@ -306,6 +309,8 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";
   public static final long    DFS_NAMENODE_DU_RESERVED_DEFAULT = 1024 * 1024 * 100; // 100
MB
   public static final String  DFS_NAMENODE_CHECKED_VOLUMES_KEY = "dfs.namenode.resource.checked.volumes";
+  public static final String  DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY = "dfs.namenode.resource.checked.volumes.minimum";
+  public static final int     DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_DEFAULT = 1;
   public static final String  DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY = "dfs.web.authentication.kerberos.principal";
   public static final String  DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY = "dfs.web.authentication.kerberos.keytab";
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java?rev=1211749&r1=1211748&r2=1211749&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
Thu Dec  8 02:57:47 2011
@@ -67,6 +67,11 @@ public class BlockLocalPathInfo implemen
   public String getBlockPath() {return localBlockPath;}
   
   /**
+   * @return the Block
+   */
+  public ExtendedBlock getBlock() { return block;}
+  
+  /**
    * Get the Block metadata file.
    * @return Block metadata file.
    */

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java?rev=1211749&r1=1211748&r2=1211749&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java
Thu Dec  8 02:57:47 2011
@@ -8,7 +8,7 @@ public final class HdfsProtos {
   public static void registerAllExtensions(
       com.google.protobuf.ExtensionRegistry registry) {
   }
-  public enum ReplicaState
+  public enum ReplicaStateProto
       implements com.google.protobuf.ProtocolMessageEnum {
     FINALIZED(0, 0),
     RBW(1, 1),
@@ -26,7 +26,7 @@ public final class HdfsProtos {
     
     public final int getNumber() { return value; }
     
-    public static ReplicaState valueOf(int value) {
+    public static ReplicaStateProto valueOf(int value) {
       switch (value) {
         case 0: return FINALIZED;
         case 1: return RBW;
@@ -37,15 +37,15 @@ public final class HdfsProtos {
       }
     }
     
-    public static com.google.protobuf.Internal.EnumLiteMap<ReplicaState>
+    public static com.google.protobuf.Internal.EnumLiteMap<ReplicaStateProto>
         internalGetValueMap() {
       return internalValueMap;
     }
-    private static com.google.protobuf.Internal.EnumLiteMap<ReplicaState>
+    private static com.google.protobuf.Internal.EnumLiteMap<ReplicaStateProto>
         internalValueMap =
-          new com.google.protobuf.Internal.EnumLiteMap<ReplicaState>() {
-            public ReplicaState findValueByNumber(int number) {
-              return ReplicaState.valueOf(number);
+          new com.google.protobuf.Internal.EnumLiteMap<ReplicaStateProto>() {
+            public ReplicaStateProto findValueByNumber(int number) {
+              return ReplicaStateProto.valueOf(number);
             }
           };
     
@@ -62,11 +62,11 @@ public final class HdfsProtos {
       return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0);
     }
     
-    private static final ReplicaState[] VALUES = {
+    private static final ReplicaStateProto[] VALUES = {
       FINALIZED, RBW, RWR, RUR, TEMPORARY, 
     };
     
-    public static ReplicaState valueOf(
+    public static ReplicaStateProto valueOf(
         com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
       if (desc.getType() != getDescriptor()) {
         throw new java.lang.IllegalArgumentException(
@@ -78,12 +78,12 @@ public final class HdfsProtos {
     private final int index;
     private final int value;
     
-    private ReplicaState(int index, int value) {
+    private ReplicaStateProto(int index, int value) {
       this.index = index;
       this.value = value;
     }
     
-    // @@protoc_insertion_point(enum_scope:ReplicaState)
+    // @@protoc_insertion_point(enum_scope:ReplicaStateProto)
   }
   
   public interface ExtendedBlockProtoOrBuilder
@@ -14903,15 +14903,10 @@ public final class HdfsProtos {
     org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
     org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
     
-    // repeated .DatanodeIDProto datanodeIDs = 2;
-    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>

-        getDatanodeIDsList();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index);
+    // repeated string datanodeIDs = 2;
+    java.util.List<String> getDatanodeIDsList();
     int getDatanodeIDsCount();
-    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>

-        getDatanodeIDsOrBuilderList();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
-        int index);
+    String getDatanodeIDs(int index);
   }
   public static final class BlockWithLocationsProto extends
       com.google.protobuf.GeneratedMessage
@@ -14955,30 +14950,23 @@ public final class HdfsProtos {
       return block_;
     }
     
-    // repeated .DatanodeIDProto datanodeIDs = 2;
+    // repeated string datanodeIDs = 2;
     public static final int DATANODEIDS_FIELD_NUMBER = 2;
-    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>
datanodeIDs_;
-    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>
getDatanodeIDsList() {
-      return datanodeIDs_;
-    }
-    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>

-        getDatanodeIDsOrBuilderList() {
+    private com.google.protobuf.LazyStringList datanodeIDs_;
+    public java.util.List<String>
+        getDatanodeIDsList() {
       return datanodeIDs_;
     }
     public int getDatanodeIDsCount() {
       return datanodeIDs_.size();
     }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int
index) {
-      return datanodeIDs_.get(index);
-    }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
-        int index) {
+    public String getDatanodeIDs(int index) {
       return datanodeIDs_.get(index);
     }
     
     private void initFields() {
       block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
-      datanodeIDs_ = java.util.Collections.emptyList();
+      datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -14993,12 +14981,6 @@ public final class HdfsProtos {
         memoizedIsInitialized = 0;
         return false;
       }
-      for (int i = 0; i < getDatanodeIDsCount(); i++) {
-        if (!getDatanodeIDs(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
       memoizedIsInitialized = 1;
       return true;
     }
@@ -15010,7 +14992,7 @@ public final class HdfsProtos {
         output.writeMessage(1, block_);
       }
       for (int i = 0; i < datanodeIDs_.size(); i++) {
-        output.writeMessage(2, datanodeIDs_.get(i));
+        output.writeBytes(2, datanodeIDs_.getByteString(i));
       }
       getUnknownFields().writeTo(output);
     }
@@ -15025,9 +15007,14 @@ public final class HdfsProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(1, block_);
       }
-      for (int i = 0; i < datanodeIDs_.size(); i++) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, datanodeIDs_.get(i));
+      {
+        int dataSize = 0;
+        for (int i = 0; i < datanodeIDs_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeBytesSizeNoTag(datanodeIDs_.getByteString(i));
+        }
+        size += dataSize;
+        size += 1 * getDatanodeIDsList().size();
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -15185,7 +15172,6 @@ public final class HdfsProtos {
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
           getBlockFieldBuilder();
-          getDatanodeIDsFieldBuilder();
         }
       }
       private static Builder create() {
@@ -15200,12 +15186,8 @@ public final class HdfsProtos {
           blockBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
-        if (datanodeIDsBuilder_ == null) {
-          datanodeIDs_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000002);
-        } else {
-          datanodeIDsBuilder_.clear();
-        }
+        datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
         return this;
       }
       
@@ -15252,15 +15234,12 @@ public final class HdfsProtos {
         } else {
           result.block_ = blockBuilder_.build();
         }
-        if (datanodeIDsBuilder_ == null) {
-          if (((bitField0_ & 0x00000002) == 0x00000002)) {
-            datanodeIDs_ = java.util.Collections.unmodifiableList(datanodeIDs_);
-            bitField0_ = (bitField0_ & ~0x00000002);
-          }
-          result.datanodeIDs_ = datanodeIDs_;
-        } else {
-          result.datanodeIDs_ = datanodeIDsBuilder_.build();
+        if (((bitField0_ & 0x00000002) == 0x00000002)) {
+          datanodeIDs_ = new com.google.protobuf.UnmodifiableLazyStringList(
+              datanodeIDs_);
+          bitField0_ = (bitField0_ & ~0x00000002);
         }
+        result.datanodeIDs_ = datanodeIDs_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -15280,31 +15259,15 @@ public final class HdfsProtos {
         if (other.hasBlock()) {
           mergeBlock(other.getBlock());
         }
-        if (datanodeIDsBuilder_ == null) {
-          if (!other.datanodeIDs_.isEmpty()) {
-            if (datanodeIDs_.isEmpty()) {
-              datanodeIDs_ = other.datanodeIDs_;
-              bitField0_ = (bitField0_ & ~0x00000002);
-            } else {
-              ensureDatanodeIDsIsMutable();
-              datanodeIDs_.addAll(other.datanodeIDs_);
-            }
-            onChanged();
-          }
-        } else {
-          if (!other.datanodeIDs_.isEmpty()) {
-            if (datanodeIDsBuilder_.isEmpty()) {
-              datanodeIDsBuilder_.dispose();
-              datanodeIDsBuilder_ = null;
-              datanodeIDs_ = other.datanodeIDs_;
-              bitField0_ = (bitField0_ & ~0x00000002);
-              datanodeIDsBuilder_ = 
-                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
-                   getDatanodeIDsFieldBuilder() : null;
-            } else {
-              datanodeIDsBuilder_.addAllMessages(other.datanodeIDs_);
-            }
+        if (!other.datanodeIDs_.isEmpty()) {
+          if (datanodeIDs_.isEmpty()) {
+            datanodeIDs_ = other.datanodeIDs_;
+            bitField0_ = (bitField0_ & ~0x00000002);
+          } else {
+            ensureDatanodeIDsIsMutable();
+            datanodeIDs_.addAll(other.datanodeIDs_);
           }
+          onChanged();
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
@@ -15319,12 +15282,6 @@ public final class HdfsProtos {
           
           return false;
         }
-        for (int i = 0; i < getDatanodeIDsCount(); i++) {
-          if (!getDatanodeIDs(i).isInitialized()) {
-            
-            return false;
-          }
-        }
         return true;
       }
       
@@ -15361,9 +15318,8 @@ public final class HdfsProtos {
               break;
             }
             case 18: {
-              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder
= org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
-              input.readMessage(subBuilder, extensionRegistry);
-              addDatanodeIDs(subBuilder.buildPartial());
+              ensureDatanodeIDsIsMutable();
+              datanodeIDs_.add(input.readBytes());
               break;
             }
           }
@@ -15462,190 +15418,60 @@ public final class HdfsProtos {
         return blockBuilder_;
       }
       
-      // repeated .DatanodeIDProto datanodeIDs = 2;
-      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>
datanodeIDs_ =
-        java.util.Collections.emptyList();
+      // repeated string datanodeIDs = 2;
+      private com.google.protobuf.LazyStringList datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
       private void ensureDatanodeIDsIsMutable() {
         if (!((bitField0_ & 0x00000002) == 0x00000002)) {
-          datanodeIDs_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>(datanodeIDs_);
+          datanodeIDs_ = new com.google.protobuf.LazyStringArrayList(datanodeIDs_);
           bitField0_ |= 0x00000002;
          }
       }
-      
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDsBuilder_;
-      
-      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>
getDatanodeIDsList() {
-        if (datanodeIDsBuilder_ == null) {
-          return java.util.Collections.unmodifiableList(datanodeIDs_);
-        } else {
-          return datanodeIDsBuilder_.getMessageList();
-        }
+      public java.util.List<String>
+          getDatanodeIDsList() {
+        return java.util.Collections.unmodifiableList(datanodeIDs_);
       }
       public int getDatanodeIDsCount() {
-        if (datanodeIDsBuilder_ == null) {
-          return datanodeIDs_.size();
-        } else {
-          return datanodeIDsBuilder_.getCount();
-        }
+        return datanodeIDs_.size();
       }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int
index) {
-        if (datanodeIDsBuilder_ == null) {
-          return datanodeIDs_.get(index);
-        } else {
-          return datanodeIDsBuilder_.getMessage(index);
-        }
+      public String getDatanodeIDs(int index) {
+        return datanodeIDs_.get(index);
       }
       public Builder setDatanodeIDs(
-          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value)
{
-        if (datanodeIDsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.set(index, value);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.setMessage(index, value);
-        }
-        return this;
-      }
-      public Builder setDatanodeIDs(
-          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder
builderForValue) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.set(index, builderForValue.build());
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.setMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      public Builder addDatanodeIDs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto
value) {
-        if (datanodeIDsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.add(value);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addMessage(value);
-        }
-        return this;
-      }
-      public Builder addDatanodeIDs(
-          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value)
{
-        if (datanodeIDsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.add(index, value);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addMessage(index, value);
-        }
-        return this;
-      }
-      public Builder addDatanodeIDs(
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue)
{
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.add(builderForValue.build());
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addMessage(builderForValue.build());
-        }
+          int index, String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureDatanodeIDsIsMutable();
+        datanodeIDs_.set(index, value);
+        onChanged();
         return this;
       }
-      public Builder addDatanodeIDs(
-          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder
builderForValue) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.add(index, builderForValue.build());
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addMessage(index, builderForValue.build());
-        }
+      public Builder addDatanodeIDs(String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureDatanodeIDsIsMutable();
+        datanodeIDs_.add(value);
+        onChanged();
         return this;
       }
       public Builder addAllDatanodeIDs(
-          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>
values) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          super.addAll(values, datanodeIDs_);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addAllMessages(values);
-        }
+          java.lang.Iterable<String> values) {
+        ensureDatanodeIDsIsMutable();
+        super.addAll(values, datanodeIDs_);
+        onChanged();
         return this;
       }
       public Builder clearDatanodeIDs() {
-        if (datanodeIDsBuilder_ == null) {
-          datanodeIDs_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000002);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.clear();
-        }
-        return this;
-      }
-      public Builder removeDatanodeIDs(int index) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.remove(index);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.remove(index);
-        }
+        datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        onChanged();
         return this;
       }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDsBuilder(
-          int index) {
-        return getDatanodeIDsFieldBuilder().getBuilder(index);
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
-          int index) {
-        if (datanodeIDsBuilder_ == null) {
-          return datanodeIDs_.get(index);  } else {
-          return datanodeIDsBuilder_.getMessageOrBuilder(index);
-        }
-      }
-      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>

-           getDatanodeIDsOrBuilderList() {
-        if (datanodeIDsBuilder_ != null) {
-          return datanodeIDsBuilder_.getMessageOrBuilderList();
-        } else {
-          return java.util.Collections.unmodifiableList(datanodeIDs_);
-        }
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodeIDsBuilder()
{
-        return getDatanodeIDsFieldBuilder().addBuilder(
-            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodeIDsBuilder(
-          int index) {
-        return getDatanodeIDsFieldBuilder().addBuilder(
-            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
-      }
-      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder>

-           getDatanodeIDsBuilderList() {
-        return getDatanodeIDsFieldBuilder().getBuilderList();
-      }
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
-          getDatanodeIDsFieldBuilder() {
-        if (datanodeIDsBuilder_ == null) {
-          datanodeIDsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
-              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
-                  datanodeIDs_,
-                  ((bitField0_ & 0x00000002) == 0x00000002),
-                  getParentForChildren(),
-                  isClean());
-          datanodeIDs_ = null;
-        }
-        return datanodeIDsBuilder_;
+      void addDatanodeIDs(com.google.protobuf.ByteString value) {
+        ensureDatanodeIDsIsMutable();
+        datanodeIDs_.add(value);
+        onChanged();
       }
       
       // @@protoc_insertion_point(builder_scope:BlockWithLocationsProto)
@@ -20348,29 +20174,28 @@ public final class HdfsProtos {
       "ature\030\001 \002(\0132\031.CheckpointSignatureProto\022\031" +
       "\n\021needToReturnImage\030\002 \002(\010\"A\n\nBlockProto\022" +
       "\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\020\n\010nu"
+
-      "mBytes\030\003 \001(\004\"\\\n\027BlockWithLocationsProto\022",
-      "\032\n\005block\030\001 \002(\0132\013.BlockProto\022%\n\013datanodeI" +
-      "Ds\030\002 \003(\0132\020.DatanodeIDProto\"D\n\030BlocksWith" +
-      "LocationsProto\022(\n\006blocks\030\001 \003(\0132\030.BlockWi" +
-      "thLocationsProto\"8\n\022RemoteEditLogProto\022\021" +
-      "\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\"?\n\032Re"
+
-      "moteEditLogManifestProto\022!\n\004logs\030\001 \003(\0132\023" +
-      ".RemoteEditLogProto\"\203\001\n\022NamespaceInfoPro" +
-      "to\022\024\n\014buildVersion\030\001 \002(\t\022\032\n\022distUpgradeV" +
-      "ersion\030\002 \002(\r\022\023\n\013blockPoolID\030\003 \002(\t\022&\n\013sto"
+
-      "rageInfo\030\004 \002(\0132\021.StorageInfoProto\"D\n\rBlo",
-      "ckKeyProto\022\r\n\005keyId\030\001 \002(\r\022\022\n\nexpiryDate\030" +
-      "\002 \002(\004\022\020\n\010keyBytes\030\003 \002(\014\"\254\001\n\026ExportedBloc"
+
-      "kKeysProto\022\033\n\023isBlockTokenEnabled\030\001 \002(\010\022" +
-      "\031\n\021keyUpdateInterval\030\002 \002(\004\022\025\n\rtokenLifeT" +
-      "ime\030\003 \002(\004\022\"\n\ncurrentKey\030\004 \002(\0132\016.BlockKey" +
-      "Proto\022\037\n\007allKeys\030\005 \003(\0132\016.BlockKeyProto\"N" +
-      "\n\024RecoveringBlockProto\022\023\n\013newGenStamp\030\001 " +
-      "\002(\004\022!\n\005block\030\002 \002(\0132\022.LocatedBlockProto*G" +
-      "\n\014ReplicaState\022\r\n\tFINALIZED\020\000\022\007\n\003RBW\020\001\022\007" +
-      "\n\003RWR\020\002\022\007\n\003RUR\020\003\022\r\n\tTEMPORARY\020\004B6\n%org.a",
-      "pache.hadoop.hdfs.protocol.protoB\nHdfsPr" +
-      "otos\240\001\001"
+      "mBytes\030\003 \001(\004\"J\n\027BlockWithLocationsProto\022",
+      "\032\n\005block\030\001 \002(\0132\013.BlockProto\022\023\n\013datanodeI" +
+      "Ds\030\002 \003(\t\"D\n\030BlocksWithLocationsProto\022(\n\006" +
+      "blocks\030\001 \003(\0132\030.BlockWithLocationsProto\"8" +
+      "\n\022RemoteEditLogProto\022\021\n\tstartTxId\030\001 \002(\004\022" +
+      "\017\n\007endTxId\030\002 \002(\004\"?\n\032RemoteEditLogManifes" +
+      "tProto\022!\n\004logs\030\001 \003(\0132\023.RemoteEditLogProt" +
+      "o\"\203\001\n\022NamespaceInfoProto\022\024\n\014buildVersion" +
+      "\030\001 \002(\t\022\032\n\022distUpgradeVersion\030\002 \002(\r\022\023\n\013bl"
+
+      "ockPoolID\030\003 \002(\t\022&\n\013storageInfo\030\004 \002(\0132\021.S" +
+      "torageInfoProto\"D\n\rBlockKeyProto\022\r\n\005keyI",
+      "d\030\001 \002(\r\022\022\n\nexpiryDate\030\002 \002(\004\022\020\n\010keyBytes\030"
+
+      "\003 \002(\014\"\254\001\n\026ExportedBlockKeysProto\022\033\n\023isBl" +
+      "ockTokenEnabled\030\001 \002(\010\022\031\n\021keyUpdateInterv" +
+      "al\030\002 \002(\004\022\025\n\rtokenLifeTime\030\003 \002(\004\022\"\n\ncurre" +
+      "ntKey\030\004 \002(\0132\016.BlockKeyProto\022\037\n\007allKeys\030\005" +
+      " \003(\0132\016.BlockKeyProto\"N\n\024RecoveringBlockP" +
+      "roto\022\023\n\013newGenStamp\030\001 \002(\004\022!\n\005block\030\002 \002(\013"
+
+      "2\022.LocatedBlockProto*L\n\021ReplicaStateProt" +
+      "o\022\r\n\tFINALIZED\020\000\022\007\n\003RBW\020\001\022\007\n\003RWR\020\002\022\007\n\003RU"
+
+      "R\020\003\022\r\n\tTEMPORARY\020\004B6\n%org.apache.hadoop.",
+      "hdfs.protocol.protoB\nHdfsProtos\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java?rev=1211749&r1=1211748&r2=1211749&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java
Thu Dec  8 02:57:47 2011
@@ -484,9 +484,9 @@ public final class InterDatanodeProtocol
   public interface InitReplicaRecoveryResponseProtoOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required .ReplicaState state = 1;
+    // required .ReplicaStateProto state = 1;
     boolean hasState();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState();
+    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState();
     
     // required .BlockProto block = 2;
     boolean hasBlock();
@@ -522,13 +522,13 @@ public final class InterDatanodeProtocol
     }
     
     private int bitField0_;
-    // required .ReplicaState state = 1;
+    // required .ReplicaStateProto state = 1;
     public static final int STATE_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState state_;
+    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto state_;
     public boolean hasState() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState() {
+    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState()
{
       return state_;
     }
     
@@ -546,7 +546,7 @@ public final class InterDatanodeProtocol
     }
     
     private void initFields() {
-      state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
+      state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED;
       block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
@@ -763,7 +763,7 @@ public final class InterDatanodeProtocol
       
       public Builder clear() {
         super.clear();
-        state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
+        state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED;
         bitField0_ = (bitField0_ & ~0x00000001);
         if (blockBuilder_ == null) {
           block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
@@ -888,7 +888,7 @@ public final class InterDatanodeProtocol
             }
             case 8: {
               int rawValue = input.readEnum();
-              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.valueOf(rawValue);
+              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto value =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.valueOf(rawValue);
               if (value == null) {
                 unknownFields.mergeVarintField(1, rawValue);
               } else {
@@ -912,15 +912,15 @@ public final class InterDatanodeProtocol
       
       private int bitField0_;
       
-      // required .ReplicaState state = 1;
-      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
+      // required .ReplicaStateProto state = 1;
+      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto state_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED;
       public boolean hasState() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState() {
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState()
{
         return state_;
       }
-      public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState
value) {
+      public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto
value) {
         if (value == null) {
           throw new NullPointerException();
         }
@@ -931,7 +931,7 @@ public final class InterDatanodeProtocol
       }
       public Builder clearState() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
+        state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED;
         onChanged();
         return this;
       }
@@ -2448,22 +2448,23 @@ public final class InterDatanodeProtocol
     java.lang.String[] descriptorData = {
       "\n\033InterDatanodeProtocol.proto\032\nhdfs.prot" +
       "o\"G\n\037InitReplicaRecoveryRequestProto\022$\n\005" +
-      "block\030\001 \002(\0132\025.RecoveringBlockProto\"\\\n In" +
-      "itReplicaRecoveryResponseProto\022\034\n\005state\030" +
-      "\001 \002(\0162\r.ReplicaState\022\032\n\005block\030\002 \002(\0132\013.Bl" +
-      "ockProto\"s\n&UpdateReplicaUnderRecoveryRe" +
-      "questProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBloc" +
-      "kProto\022\022\n\nrecoveryId\030\002 \002(\004\022\021\n\tnewLength\030" +
-      "\003 \002(\004\"M\n\'UpdateReplicaUnderRecoveryRespo" +
-      "nseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlockP",
-      "roto2\353\001\n\034InterDatanodeProtocolService\022Z\n" +
-      "\023initReplicaRecovery\022 .InitReplicaRecove" +
-      "ryRequestProto\032!.InitReplicaRecoveryResp" +
-      "onseProto\022o\n\032updateReplicaUnderRecovery\022" +
-      "\'.UpdateReplicaUnderRecoveryRequestProto" +
-      "\032(.UpdateReplicaUnderRecoveryResponsePro" +
-      "toBJ\n%org.apache.hadoop.hdfs.protocol.pr" +
-      "otoB\033InterDatanodeProtocolProtos\210\001\001\240\001\001"
+      "block\030\001 \002(\0132\025.RecoveringBlockProto\"a\n In" +
+      "itReplicaRecoveryResponseProto\022!\n\005state\030" +
+      "\001 \002(\0162\022.ReplicaStateProto\022\032\n\005block\030\002 \002(\013" +
+      "2\013.BlockProto\"s\n&UpdateReplicaUnderRecov" +
+      "eryRequestProto\022\"\n\005block\030\001 \002(\0132\023.Extende" +
+      "dBlockProto\022\022\n\nrecoveryId\030\002 \002(\004\022\021\n\tnewLe" +
+      "ngth\030\003 \002(\004\"M\n\'UpdateReplicaUnderRecovery" +
+      "ResponseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedB",
+      "lockProto2\353\001\n\034InterDatanodeProtocolServi" +
+      "ce\022Z\n\023initReplicaRecovery\022 .InitReplicaR" +
+      "ecoveryRequestProto\032!.InitReplicaRecover" +
+      "yResponseProto\022o\n\032updateReplicaUnderReco" +
+      "very\022\'.UpdateReplicaUnderRecoveryRequest" +
+      "Proto\032(.UpdateReplicaUnderRecoveryRespon" +
+      "seProtoBJ\n%org.apache.hadoop.hdfs.protoc" +
+      "ol.protoB\033InterDatanodeProtocolProtos\210\001\001" +
+      "\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {



Mime
View raw message