hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1204544 [2/4] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/protocol/proto/ src/proto/
Date Mon, 21 Nov 2011 15:39:33 GMT
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java?rev=1204544&r1=1204543&r2=1204544&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java Mon Nov 21 15:39:33 2011
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 // Generated by the protocol buffer compiler.  DO NOT EDIT!
 // source: hdfs.proto
 
@@ -25,6 +8,84 @@ public final class HdfsProtos {
   public static void registerAllExtensions(
       com.google.protobuf.ExtensionRegistry registry) {
   }
+  public enum ReplicaState
+      implements com.google.protobuf.ProtocolMessageEnum {
+    FINALIZED(0, 0),
+    RBW(1, 1),
+    RWR(2, 2),
+    RUR(3, 3),
+    TEMPORARY(4, 4),
+    ;
+    
+    public static final int FINALIZED_VALUE = 0;
+    public static final int RBW_VALUE = 1;
+    public static final int RWR_VALUE = 2;
+    public static final int RUR_VALUE = 3;
+    public static final int TEMPORARY_VALUE = 4;
+    
+    
+    public final int getNumber() { return value; }
+    
+    public static ReplicaState valueOf(int value) {
+      switch (value) {
+        case 0: return FINALIZED;
+        case 1: return RBW;
+        case 2: return RWR;
+        case 3: return RUR;
+        case 4: return TEMPORARY;
+        default: return null;
+      }
+    }
+    
+    public static com.google.protobuf.Internal.EnumLiteMap<ReplicaState>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap<ReplicaState>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<ReplicaState>() {
+            public ReplicaState findValueByNumber(int number) {
+              return ReplicaState.valueOf(number);
+            }
+          };
+    
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0);
+    }
+    
+    private static final ReplicaState[] VALUES = {
+      FINALIZED, RBW, RWR, RUR, TEMPORARY, 
+    };
+    
+    public static ReplicaState valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+    
+    private final int index;
+    private final int value;
+    
+    private ReplicaState(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
+    
+    // @@protoc_insertion_point(enum_scope:ReplicaState)
+  }
+  
   public interface ExtendedBlockProtoOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
@@ -2011,1378 +2072,11845 @@ public final class HdfsProtos {
     // @@protoc_insertion_point(class_scope:DatanodeIDProto)
   }
   
-  public interface DatanodeInfoProtoOrBuilder
+  public interface DatanodeIDsProtoOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required .DatanodeIDProto id = 1;
-    boolean hasId();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder();
-    
-    // optional uint64 capacity = 2;
-    boolean hasCapacity();
-    long getCapacity();
-    
-    // optional uint64 dfsUsed = 3;
-    boolean hasDfsUsed();
-    long getDfsUsed();
-    
-    // optional uint64 remaining = 4;
-    boolean hasRemaining();
-    long getRemaining();
-    
-    // optional uint64 blockPoolUsed = 5;
-    boolean hasBlockPoolUsed();
-    long getBlockPoolUsed();
-    
-    // optional uint64 lastUpdate = 6;
-    boolean hasLastUpdate();
-    long getLastUpdate();
-    
-    // optional uint32 xceiverCount = 7;
-    boolean hasXceiverCount();
-    int getXceiverCount();
-    
-    // optional string location = 8;
-    boolean hasLocation();
-    String getLocation();
-    
-    // optional string hostName = 9;
-    boolean hasHostName();
-    String getHostName();
-    
-    // optional .DatanodeInfoProto.AdminState adminState = 10;
-    boolean hasAdminState();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState();
+    // repeated .DatanodeIDProto datanodes = 1;
+    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> 
+        getDatanodesList();
+    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodes(int index);
+    int getDatanodesCount();
+    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
+        getDatanodesOrBuilderList();
+    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodesOrBuilder(
+        int index);
   }
-  public static final class DatanodeInfoProto extends
+  public static final class DatanodeIDsProto extends
       com.google.protobuf.GeneratedMessage
-      implements DatanodeInfoProtoOrBuilder {
-    // Use DatanodeInfoProto.newBuilder() to construct.
-    private DatanodeInfoProto(Builder builder) {
+      implements DatanodeIDsProtoOrBuilder {
+    // Use DatanodeIDsProto.newBuilder() to construct.
+    private DatanodeIDsProto(Builder builder) {
       super(builder);
     }
-    private DatanodeInfoProto(boolean noInit) {}
+    private DatanodeIDsProto(boolean noInit) {}
     
-    private static final DatanodeInfoProto defaultInstance;
-    public static DatanodeInfoProto getDefaultInstance() {
+    private static final DatanodeIDsProto defaultInstance;
+    public static DatanodeIDsProto getDefaultInstance() {
       return defaultInstance;
     }
     
-    public DatanodeInfoProto getDefaultInstanceForType() {
+    public DatanodeIDsProto getDefaultInstanceForType() {
       return defaultInstance;
     }
     
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_descriptor;
+      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDsProto_descriptor;
     }
     
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_fieldAccessorTable;
-    }
-    
-    public enum AdminState
-        implements com.google.protobuf.ProtocolMessageEnum {
-      NORMAL(0, 0),
-      DECOMMISSION_INPROGRESS(1, 1),
-      DECOMMISSIONED(2, 2),
-      ;
-      
-      public static final int NORMAL_VALUE = 0;
-      public static final int DECOMMISSION_INPROGRESS_VALUE = 1;
-      public static final int DECOMMISSIONED_VALUE = 2;
-      
-      
-      public final int getNumber() { return value; }
-      
-      public static AdminState valueOf(int value) {
-        switch (value) {
-          case 0: return NORMAL;
-          case 1: return DECOMMISSION_INPROGRESS;
-          case 2: return DECOMMISSIONED;
-          default: return null;
-        }
-      }
-      
-      public static com.google.protobuf.Internal.EnumLiteMap<AdminState>
-          internalGetValueMap() {
-        return internalValueMap;
-      }
-      private static com.google.protobuf.Internal.EnumLiteMap<AdminState>
-          internalValueMap =
-            new com.google.protobuf.Internal.EnumLiteMap<AdminState>() {
-              public AdminState findValueByNumber(int number) {
-                return AdminState.valueOf(number);
-              }
-            };
-      
-      public final com.google.protobuf.Descriptors.EnumValueDescriptor
-          getValueDescriptor() {
-        return getDescriptor().getValues().get(index);
-      }
-      public final com.google.protobuf.Descriptors.EnumDescriptor
-          getDescriptorForType() {
-        return getDescriptor();
-      }
-      public static final com.google.protobuf.Descriptors.EnumDescriptor
-          getDescriptor() {
-        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0);
-      }
-      
-      private static final AdminState[] VALUES = {
-        NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED, 
-      };
-      
-      public static AdminState valueOf(
-          com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
-        if (desc.getType() != getDescriptor()) {
-          throw new java.lang.IllegalArgumentException(
-            "EnumValueDescriptor is not for this type.");
-        }
-        return VALUES[desc.getIndex()];
-      }
-      
-      private final int index;
-      private final int value;
-      
-      private AdminState(int index, int value) {
-        this.index = index;
-        this.value = value;
-      }
-      
-      // @@protoc_insertion_point(enum_scope:DatanodeInfoProto.AdminState)
+      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDsProto_fieldAccessorTable;
     }
     
-    private int bitField0_;
-    // required .DatanodeIDProto id = 1;
-    public static final int ID_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_;
-    public boolean hasId() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
+    // repeated .DatanodeIDProto datanodes = 1;
+    public static final int DATANODES_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> datanodes_;
+    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getDatanodesList() {
+      return datanodes_;
     }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
-      return id_;
+    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
+        getDatanodesOrBuilderList() {
+      return datanodes_;
     }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() {
-      return id_;
+    public int getDatanodesCount() {
+      return datanodes_.size();
     }
-    
-    // optional uint64 capacity = 2;
-    public static final int CAPACITY_FIELD_NUMBER = 2;
-    private long capacity_;
-    public boolean hasCapacity() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
+    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodes(int index) {
+      return datanodes_.get(index);
     }
-    public long getCapacity() {
-      return capacity_;
+    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodesOrBuilder(
+        int index) {
+      return datanodes_.get(index);
     }
     
-    // optional uint64 dfsUsed = 3;
-    public static final int DFSUSED_FIELD_NUMBER = 3;
-    private long dfsUsed_;
-    public boolean hasDfsUsed() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
+    private void initFields() {
+      datanodes_ = java.util.Collections.emptyList();
     }
-    public long getDfsUsed() {
-      return dfsUsed_;
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      for (int i = 0; i < getDatanodesCount(); i++) {
+        if (!getDatanodes(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
     }
     
-    // optional uint64 remaining = 4;
-    public static final int REMAINING_FIELD_NUMBER = 4;
-    private long remaining_;
-    public boolean hasRemaining() {
-      return ((bitField0_ & 0x00000008) == 0x00000008);
-    }
-    public long getRemaining() {
-      return remaining_;
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      for (int i = 0; i < datanodes_.size(); i++) {
+        output.writeMessage(1, datanodes_.get(i));
+      }
+      getUnknownFields().writeTo(output);
     }
     
-    // optional uint64 blockPoolUsed = 5;
-    public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5;
-    private long blockPoolUsed_;
-    public boolean hasBlockPoolUsed() {
-      return ((bitField0_ & 0x00000010) == 0x00000010);
-    }
-    public long getBlockPoolUsed() {
-      return blockPoolUsed_;
-    }
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
     
-    // optional uint64 lastUpdate = 6;
-    public static final int LASTUPDATE_FIELD_NUMBER = 6;
-    private long lastUpdate_;
-    public boolean hasLastUpdate() {
-      return ((bitField0_ & 0x00000020) == 0x00000020);
-    }
-    public long getLastUpdate() {
-      return lastUpdate_;
+      size = 0;
+      for (int i = 0; i < datanodes_.size(); i++) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, datanodes_.get(i));
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
     }
     
-    // optional uint32 xceiverCount = 7;
-    public static final int XCEIVERCOUNT_FIELD_NUMBER = 7;
-    private int xceiverCount_;
-    public boolean hasXceiverCount() {
-      return ((bitField0_ & 0x00000040) == 0x00000040);
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
     }
-    public int getXceiverCount() {
-      return xceiverCount_;
+    
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto) obj;
+      
+      boolean result = true;
+      result = result && getDatanodesList()
+          .equals(other.getDatanodesList());
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
     }
     
-    // optional string location = 8;
-    public static final int LOCATION_FIELD_NUMBER = 8;
-    private java.lang.Object location_;
-    public boolean hasLocation() {
-      return ((bitField0_ & 0x00000080) == 0x00000080);
+    @java.lang.Override
+    public int hashCode() {
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getDatanodesCount() > 0) {
+        hash = (37 * hash) + DATANODES_FIELD_NUMBER;
+        hash = (53 * hash) + getDatanodesList().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      return hash;
     }
-    public String getLocation() {
-      java.lang.Object ref = location_;
-      if (ref instanceof String) {
-        return (String) ref;
-      } else {
-        com.google.protobuf.ByteString bs = 
-            (com.google.protobuf.ByteString) ref;
-        String s = bs.toStringUtf8();
-        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
-          location_ = s;
-        }
-        return s;
-      }
+    
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
     }
-    private com.google.protobuf.ByteString getLocationBytes() {
-      java.lang.Object ref = location_;
-      if (ref instanceof String) {
-        com.google.protobuf.ByteString b = 
-            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
-        location_ = b;
-        return b;
-      } else {
-        return (com.google.protobuf.ByteString) ref;
-      }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
     }
-    
-    // optional string hostName = 9;
-    public static final int HOSTNAME_FIELD_NUMBER = 9;
-    private java.lang.Object hostName_;
-    public boolean hasHostName() {
-      return ((bitField0_ & 0x00000100) == 0x00000100);
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
     }
-    public String getHostName() {
-      java.lang.Object ref = hostName_;
-      if (ref instanceof String) {
-        return (String) ref;
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
       } else {
-        com.google.protobuf.ByteString bs = 
-            (com.google.protobuf.ByteString) ref;
-        String s = bs.toStringUtf8();
-        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
-          hostName_ = s;
-        }
-        return s;
+        return null;
       }
     }
-    private com.google.protobuf.ByteString getHostNameBytes() {
-      java.lang.Object ref = hostName_;
-      if (ref instanceof String) {
-        com.google.protobuf.ByteString b = 
-            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
-        hostName_ = b;
-        return b;
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
       } else {
-        return (com.google.protobuf.ByteString) ref;
+        return null;
       }
     }
-    
-    // optional .DatanodeInfoProto.AdminState adminState = 10;
-    public static final int ADMINSTATE_FIELD_NUMBER = 10;
-    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_;
-    public boolean hasAdminState() {
-      return ((bitField0_ & 0x00000200) == 0x00000200);
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
     }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() {
-      return adminState_;
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
     }
     
-    private void initFields() {
-      id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
-      capacity_ = 0L;
-      dfsUsed_ = 0L;
-      remaining_ = 0L;
-      blockPoolUsed_ = 0L;
-      lastUpdate_ = 0L;
-      xceiverCount_ = 0;
-      location_ = "";
-      hostName_ = "";
-      adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-      
-      if (!hasId()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!getId().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      memoizedIsInitialized = 1;
-      return true;
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto prototype) {
+      return newBuilder().mergeFrom(prototype);
     }
+    public Builder toBuilder() { return newBuilder(this); }
     
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, id_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeUInt64(2, capacity_);
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeUInt64(3, dfsUsed_);
-      }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        output.writeUInt64(4, remaining_);
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDsProto_descriptor;
       }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        output.writeUInt64(5, blockPoolUsed_);
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDsProto_fieldAccessorTable;
       }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
-        output.writeUInt64(6, lastUpdate_);
+      
+      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
       }
-      if (((bitField0_ & 0x00000040) == 0x00000040)) {
-        output.writeUInt32(7, xceiverCount_);
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
       }
-      if (((bitField0_ & 0x00000080) == 0x00000080)) {
-        output.writeBytes(8, getLocationBytes());
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getDatanodesFieldBuilder();
+        }
       }
-      if (((bitField0_ & 0x00000100) == 0x00000100)) {
-        output.writeBytes(9, getHostNameBytes());
+      private static Builder create() {
+        return new Builder();
       }
-      if (((bitField0_ & 0x00000200) == 0x00000200)) {
-        output.writeEnum(10, adminState_.getNumber());
+      
+      public Builder clear() {
+        super.clear();
+        if (datanodesBuilder_ == null) {
+          datanodes_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          datanodesBuilder_.clear();
+        }
+        return this;
       }
-      getUnknownFields().writeTo(output);
-    }
-    
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-    
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, id_);
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
       }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(2, capacity_);
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.getDescriptor();
       }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(3, dfsUsed_);
+      
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto getDefaultInstanceForType() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.getDefaultInstance();
       }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(4, remaining_);
+      
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto build() {
+        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
       }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(5, blockPoolUsed_);
+      
+      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
       }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(6, lastUpdate_);
+      
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto buildPartial() {
+        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto(this);
+        int from_bitField0_ = bitField0_;
+        if (datanodesBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            datanodes_ = java.util.Collections.unmodifiableList(datanodes_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.datanodes_ = datanodes_;
+        } else {
+          result.datanodes_ = datanodesBuilder_.build();
+        }
+        onBuilt();
+        return result;
       }
-      if (((bitField0_ & 0x00000040) == 0x00000040)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt32Size(7, xceiverCount_);
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto) {
+          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
       }
-      if (((bitField0_ & 0x00000080) == 0x00000080)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(8, getLocationBytes());
-      }
-      if (((bitField0_ & 0x00000100) == 0x00000100)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(9, getHostNameBytes());
-      }
-      if (((bitField0_ & 0x00000200) == 0x00000200)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeEnumSize(10, adminState_.getNumber());
+      
+      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto other) {
+        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.getDefaultInstance()) return this;
+        if (datanodesBuilder_ == null) {
+          if (!other.datanodes_.isEmpty()) {
+            if (datanodes_.isEmpty()) {
+              datanodes_ = other.datanodes_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureDatanodesIsMutable();
+              datanodes_.addAll(other.datanodes_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.datanodes_.isEmpty()) {
+            if (datanodesBuilder_.isEmpty()) {
+              datanodesBuilder_.dispose();
+              datanodesBuilder_ = null;
+              datanodes_ = other.datanodes_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              datanodesBuilder_ = 
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getDatanodesFieldBuilder() : null;
+            } else {
+              datanodesBuilder_.addAllMessages(other.datanodes_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
       }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-    
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-    
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
+      
+      public final boolean isInitialized() {
+        for (int i = 0; i < getDatanodesCount(); i++) {
+          if (!getDatanodes(i).isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
       }
-      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) {
-        return super.equals(obj);
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
+              input.readMessage(subBuilder, extensionRegistry);
+              addDatanodes(subBuilder.buildPartial());
+              break;
+            }
+          }
+        }
       }
-      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj;
       
-      boolean result = true;
-      result = result && (hasId() == other.hasId());
-      if (hasId()) {
-        result = result && getId()
-            .equals(other.getId());
+      private int bitField0_;
+      
+      // repeated .DatanodeIDProto datanodes = 1;
+      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> datanodes_ =
+        java.util.Collections.emptyList();
+      private void ensureDatanodesIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          datanodes_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>(datanodes_);
+          bitField0_ |= 0x00000001;
+         }
       }
-      result = result && (hasCapacity() == other.hasCapacity());
-      if (hasCapacity()) {
-        result = result && (getCapacity()
-            == other.getCapacity());
+      
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodesBuilder_;
+      
+      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getDatanodesList() {
+        if (datanodesBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(datanodes_);
+        } else {
+          return datanodesBuilder_.getMessageList();
+        }
       }
-      result = result && (hasDfsUsed() == other.hasDfsUsed());
-      if (hasDfsUsed()) {
-        result = result && (getDfsUsed()
-            == other.getDfsUsed());
+      public int getDatanodesCount() {
+        if (datanodesBuilder_ == null) {
+          return datanodes_.size();
+        } else {
+          return datanodesBuilder_.getCount();
+        }
       }
-      result = result && (hasRemaining() == other.hasRemaining());
-      if (hasRemaining()) {
-        result = result && (getRemaining()
-            == other.getRemaining());
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodes(int index) {
+        if (datanodesBuilder_ == null) {
+          return datanodes_.get(index);
+        } else {
+          return datanodesBuilder_.getMessage(index);
+        }
       }
-      result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed());
-      if (hasBlockPoolUsed()) {
-        result = result && (getBlockPoolUsed()
-            == other.getBlockPoolUsed());
+      public Builder setDatanodes(
+          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
+        if (datanodesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureDatanodesIsMutable();
+          datanodes_.set(index, value);
+          onChanged();
+        } else {
+          datanodesBuilder_.setMessage(index, value);
+        }
+        return this;
       }
-      result = result && (hasLastUpdate() == other.hasLastUpdate());
-      if (hasLastUpdate()) {
-        result = result && (getLastUpdate()
-            == other.getLastUpdate());
+      public Builder setDatanodes(
+          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
+        if (datanodesBuilder_ == null) {
+          ensureDatanodesIsMutable();
+          datanodes_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          datanodesBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
       }
-      result = result && (hasXceiverCount() == other.hasXceiverCount());
-      if (hasXceiverCount()) {
-        result = result && (getXceiverCount()
-            == other.getXceiverCount());
+      public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
+        if (datanodesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureDatanodesIsMutable();
+          datanodes_.add(value);
+          onChanged();
+        } else {
+          datanodesBuilder_.addMessage(value);
+        }
+        return this;
       }
-      result = result && (hasLocation() == other.hasLocation());
-      if (hasLocation()) {
-        result = result && getLocation()
-            .equals(other.getLocation());
+      public Builder addDatanodes(
+          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
+        if (datanodesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureDatanodesIsMutable();
+          datanodes_.add(index, value);
+          onChanged();
+        } else {
+          datanodesBuilder_.addMessage(index, value);
+        }
+        return this;
       }
-      result = result && (hasHostName() == other.hasHostName());
-      if (hasHostName()) {
-        result = result && getHostName()
-            .equals(other.getHostName());
+      public Builder addDatanodes(
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
+        if (datanodesBuilder_ == null) {
+          ensureDatanodesIsMutable();
+          datanodes_.add(builderForValue.build());
+          onChanged();
+        } else {
+          datanodesBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
       }
-      result = result && (hasAdminState() == other.hasAdminState());
-      if (hasAdminState()) {
-        result = result &&
-            (getAdminState() == other.getAdminState());
+      public Builder addDatanodes(
+          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
+        if (datanodesBuilder_ == null) {
+          ensureDatanodesIsMutable();
+          datanodes_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          datanodesBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
       }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-    
-    @java.lang.Override
-    public int hashCode() {
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasId()) {
-        hash = (37 * hash) + ID_FIELD_NUMBER;
-        hash = (53 * hash) + getId().hashCode();
+      public Builder addAllDatanodes(
+          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> values) {
+        if (datanodesBuilder_ == null) {
+          ensureDatanodesIsMutable();
+          super.addAll(values, datanodes_);
+          onChanged();
+        } else {
+          datanodesBuilder_.addAllMessages(values);
+        }
+        return this;
       }
-      if (hasCapacity()) {
-        hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getCapacity());
+      public Builder clearDatanodes() {
+        if (datanodesBuilder_ == null) {
+          datanodes_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          datanodesBuilder_.clear();
+        }
+        return this;
       }
-      if (hasDfsUsed()) {
-        hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getDfsUsed());
+      public Builder removeDatanodes(int index) {
+        if (datanodesBuilder_ == null) {
+          ensureDatanodesIsMutable();
+          datanodes_.remove(index);
+          onChanged();
+        } else {
+          datanodesBuilder_.remove(index);
+        }
+        return this;
       }
-      if (hasRemaining()) {
-        hash = (37 * hash) + REMAINING_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getRemaining());
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodesBuilder(
+          int index) {
+        return getDatanodesFieldBuilder().getBuilder(index);
       }
-      if (hasBlockPoolUsed()) {
-        hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getBlockPoolUsed());
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodesOrBuilder(
+          int index) {
+        if (datanodesBuilder_ == null) {
+          return datanodes_.get(index);  } else {
+          return datanodesBuilder_.getMessageOrBuilder(index);
+        }
       }
-      if (hasLastUpdate()) {
-        hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getLastUpdate());
+      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
+           getDatanodesOrBuilderList() {
+        if (datanodesBuilder_ != null) {
+          return datanodesBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(datanodes_);
+        }
       }
-      if (hasXceiverCount()) {
-        hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
-        hash = (53 * hash) + getXceiverCount();
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodesBuilder() {
+        return getDatanodesFieldBuilder().addBuilder(
+            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
       }
-      if (hasLocation()) {
-        hash = (37 * hash) + LOCATION_FIELD_NUMBER;
-        hash = (53 * hash) + getLocation().hashCode();
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodesBuilder(
+          int index) {
+        return getDatanodesFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
       }
-      if (hasHostName()) {
-        hash = (37 * hash) + HOSTNAME_FIELD_NUMBER;
-        hash = (53 * hash) + getHostName().hashCode();
+      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder> 
+           getDatanodesBuilderList() {
+        return getDatanodesFieldBuilder().getBuilderList();
       }
-      if (hasAdminState()) {
-        hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER;
-        hash = (53 * hash) + hashEnum(getAdminState());
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
+          getDatanodesFieldBuilder() {
+        if (datanodesBuilder_ == null) {
+          datanodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
+                  datanodes_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          datanodes_ = null;
+        }
+        return datanodesBuilder_;
       }
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      return hash;
+      
+      // @@protoc_insertion_point(builder_scope:DatanodeIDsProto)
     }
     
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data, extensionRegistry)
-               .buildParsed();
+    static {
+      defaultInstance = new DatanodeIDsProto(true);
+      defaultInstance.initFields();
     }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data, extensionRegistry)
-               .buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input, extensionRegistry)
-               .buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      Builder builder = newBuilder();
-      if (builder.mergeDelimitedFrom(input)) {
-        return builder.buildParsed();
-      } else {
-        return null;
-      }
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      Builder builder = newBuilder();
-      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
-        return builder.buildParsed();
-      } else {
-        return null;
-      }
+    
+    // @@protoc_insertion_point(class_scope:DatanodeIDsProto)
+  }
+  
+  public interface DatanodeInfoProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+    
+    // required .DatanodeIDProto id = 1;
+    boolean hasId();
+    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId();
+    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder();
+    
+    // optional uint64 capacity = 2;
+    boolean hasCapacity();
+    long getCapacity();
+    
+    // optional uint64 dfsUsed = 3;
+    boolean hasDfsUsed();
+    long getDfsUsed();
+    
+    // optional uint64 remaining = 4;
+    boolean hasRemaining();
+    long getRemaining();
+    
+    // optional uint64 blockPoolUsed = 5;
+    boolean hasBlockPoolUsed();
+    long getBlockPoolUsed();
+    
+    // optional uint64 lastUpdate = 6;
+    boolean hasLastUpdate();
+    long getLastUpdate();
+    
+    // optional uint32 xceiverCount = 7;
+    boolean hasXceiverCount();
+    int getXceiverCount();
+    
+    // optional string location = 8;
+    boolean hasLocation();
+    String getLocation();
+    
+    // optional string hostName = 9;
+    boolean hasHostName();
+    String getHostName();
+    
+    // optional .DatanodeInfoProto.AdminState adminState = 10;
+    boolean hasAdminState();
+    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState();
+  }
+  public static final class DatanodeInfoProto extends
+      com.google.protobuf.GeneratedMessage
+      implements DatanodeInfoProtoOrBuilder {
+    // Use DatanodeInfoProto.newBuilder() to construct.
+    private DatanodeInfoProto(Builder builder) {
+      super(builder);
     }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input).buildParsed();
+    private DatanodeInfoProto(boolean noInit) {}
+    
+    private static final DatanodeInfoProto defaultInstance;
+    public static DatanodeInfoProto getDefaultInstance() {
+      return defaultInstance;
     }
-    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input, extensionRegistry)
-               .buildParsed();
+    
+    public DatanodeInfoProto getDefaultInstanceForType() {
+      return defaultInstance;
     }
     
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) {
-      return newBuilder().mergeFrom(prototype);
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_descriptor;
     }
-    public Builder toBuilder() { return newBuilder(this); }
     
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_fieldAccessorTable;
     }
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_descriptor;
-      }
+    
+    public enum AdminState
+        implements com.google.protobuf.ProtocolMessageEnum {
+      NORMAL(0, 0),
+      DECOMMISSION_INPROGRESS(1, 1),
+      DECOMMISSIONED(2, 2),
+      ;
       
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_fieldAccessorTable;
-      }
+      public static final int NORMAL_VALUE = 0;
+      public static final int DECOMMISSION_INPROGRESS_VALUE = 1;
+      public static final int DECOMMISSIONED_VALUE = 2;
       
-      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
       
-      private Builder(BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getIdFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
+      public final int getNumber() { return value; }
       
-      public Builder clear() {
-        super.clear();
-        if (idBuilder_ == null) {
-          id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
-        } else {
-          idBuilder_.clear();
+      public static AdminState valueOf(int value) {
+        switch (value) {
+          case 0: return NORMAL;
+          case 1: return DECOMMISSION_INPROGRESS;
+          case 2: return DECOMMISSIONED;
+          default: return null;
         }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        capacity_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000002);
-        dfsUsed_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000004);
-        remaining_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000008);
-        blockPoolUsed_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000010);
-        lastUpdate_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000020);
-        xceiverCount_ = 0;
-        bitField0_ = (bitField0_ & ~0x00000040);
-        location_ = "";
-        bitField0_ = (bitField0_ & ~0x00000080);
-        hostName_ = "";
-        bitField0_ = (bitField0_ & ~0x00000100);
-        adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
-        bitField0_ = (bitField0_ & ~0x00000200);
-        return this;
       }
       
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
+      public static com.google.protobuf.Internal.EnumLiteMap<AdminState>
+          internalGetValueMap() {
+        return internalValueMap;
       }
+      private static com.google.protobuf.Internal.EnumLiteMap<AdminState>
+          internalValueMap =
+            new com.google.protobuf.Internal.EnumLiteMap<AdminState>() {
+              public AdminState findValueByNumber(int number) {
+                return AdminState.valueOf(number);
+              }
+            };
       
-      public com.google.protobuf.Descriptors.Descriptor
+      public final com.google.protobuf.Descriptors.EnumValueDescriptor
+          getValueDescriptor() {
+        return getDescriptor().getValues().get(index);
+      }
+      public final com.google.protobuf.Descriptors.EnumDescriptor
           getDescriptorForType() {
-        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor();
+        return getDescriptor();
       }
-      
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() {
-        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
+      public static final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptor() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0);
       }
       
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() {
-        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
+      private static final AdminState[] VALUES = {
+        NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED, 
+      };
+      
+      public static AdminState valueOf(
+          com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+        if (desc.getType() != getDescriptor()) {
+          throw new java.lang.IllegalArgumentException(
+            "EnumValueDescriptor is not for this type.");
         }
-        return result;
+        return VALUES[desc.getIndex()];
       }
       
-      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildParsed()
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(
-            result).asInvalidProtocolBufferException();
-        }
-        return result;
+      private final int index;
+      private final int value;
+      
+      private AdminState(int index, int value) {
+        this.index = index;
+        this.value = value;
       }
       
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() {
-        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
+      // @@protoc_insertion_point(enum_scope:DatanodeInfoProto.AdminState)
+    }
+    
+    private int bitField0_;
+    // required .DatanodeIDProto id = 1;
+    public static final int ID_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_;
+    public boolean hasId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
+      return id_;
+    }
+    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() {
+      return id_;
+    }
+    
+    // optional uint64 capacity = 2;
+    public static final int CAPACITY_FIELD_NUMBER = 2;
+    private long capacity_;
+    public boolean hasCapacity() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public long getCapacity() {
+      return capacity_;
+    }
+    
+    // optional uint64 dfsUsed = 3;
+    public static final int DFSUSED_FIELD_NUMBER = 3;
+    private long dfsUsed_;
+    public boolean hasDfsUsed() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    public long getDfsUsed() {
+      return dfsUsed_;
+    }
+    
+    // optional uint64 remaining = 4;
+    public static final int REMAINING_FIELD_NUMBER = 4;
+    private long remaining_;
+    public boolean hasRemaining() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    public long getRemaining() {
+      return remaining_;
+    }
+    
+    // optional uint64 blockPoolUsed = 5;
+    public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5;
+    private long blockPoolUsed_;
+    public boolean hasBlockPoolUsed() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    public long getBlockPoolUsed() {
+      return blockPoolUsed_;
+    }
+    
+    // optional uint64 lastUpdate = 6;
+    public static final int LASTUPDATE_FIELD_NUMBER = 6;
+    private long lastUpdate_;
+    public boolean hasLastUpdate() {
+      return ((bitField0_ & 0x00000020) == 0x00000020);
+    }
+    public long getLastUpdate() {
+      return lastUpdate_;
+    }
+    
+    // optional uint32 xceiverCount = 7;
+    public static final int XCEIVERCOUNT_FIELD_NUMBER = 7;
+    private int xceiverCount_;
+    public boolean hasXceiverCount() {
+      return ((bitField0_ & 0x00000040) == 0x00000040);
+    }
+    public int getXceiverCount() {
+      return xceiverCount_;
+    }
+    
+    // optional string location = 8;
+    public static final int LOCATION_FIELD_NUMBER = 8;
+    private java.lang.Object location_;
+    public boolean hasLocation() {
+      return ((bitField0_ & 0x00000080) == 0x00000080);
+    }
+    public String getLocation() {
+      java.lang.Object ref = location_;
+      if (ref instanceof String) {
+        return (String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        String s = bs.toStringUtf8();
+        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+          location_ = s;
+        }
+        return s;
+      }
+    }
+    private com.google.protobuf.ByteString getLocationBytes() {
+      java.lang.Object ref = location_;
+      if (ref instanceof String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+        location_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+    
+    // optional string hostName = 9;
+    public static final int HOSTNAME_FIELD_NUMBER = 9;
+    private java.lang.Object hostName_;
+    public boolean hasHostName() {
+      return ((bitField0_ & 0x00000100) == 0x00000100);
+    }
+    public String getHostName() {
+      java.lang.Object ref = hostName_;
+      if (ref instanceof String) {
+        return (String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        String s = bs.toStringUtf8();
+        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+          hostName_ = s;
+        }
+        return s;
+      }
+    }
+    private com.google.protobuf.ByteString getHostNameBytes() {
+      java.lang.Object ref = hostName_;
+      if (ref instanceof String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+        hostName_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+    
+    // optional .DatanodeInfoProto.AdminState adminState = 10;
+    public static final int ADMINSTATE_FIELD_NUMBER = 10;
+    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_;
+    public boolean hasAdminState() {
+      return ((bitField0_ & 0x00000200) == 0x00000200);
+    }
+    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() {
+      return adminState_;
+    }
+    
+    private void initFields() {
+      id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
+      capacity_ = 0L;
+      dfsUsed_ = 0L;
+      remaining_ = 0L;
+      blockPoolUsed_ = 0L;
+      lastUpdate_ = 0L;
+      xceiverCount_ = 0;
+      location_ = "";
+      hostName_ = "";
+      adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      if (!hasId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getId().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, id_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeUInt64(2, capacity_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeUInt64(3, dfsUsed_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeUInt64(4, remaining_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeUInt64(5, blockPoolUsed_);
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        output.writeUInt64(6, lastUpdate_);
+      }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        output.writeUInt32(7, xceiverCount_);
+      }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        output.writeBytes(8, getLocationBytes());
+      }
+      if (((bitField0_ & 0x00000100) == 0x00000100)) {
+        output.writeBytes(9, getHostNameBytes());
+      }
+      if (((bitField0_ & 0x00000200) == 0x00000200)) {
+        output.writeEnum(10, adminState_.getNumber());
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, id_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(2, capacity_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(3, dfsUsed_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(4, remaining_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(5, blockPoolUsed_);
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(6, lastUpdate_);
+      }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt32Size(7, xceiverCount_);
+      }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(8, getLocationBytes());
+      }
+      if (((bitField0_ & 0x00000100) == 0x00000100)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(9, getHostNameBytes());
+      }
+      if (((bitField0_ & 0x00000200) == 0x00000200)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(10, adminState_.getNumber());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+    
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj;
+      
+      boolean result = true;
+      result = result && (hasId() == other.hasId());
+      if (hasId()) {
+        result = result && getId()
+            .equals(other.getId());
+      }
+      result = result && (hasCapacity() == other.hasCapacity());
+      if (hasCapacity()) {
+        result = result && (getCapacity()
+            == other.getCapacity());
+      }
+      result = result && (hasDfsUsed() == other.hasDfsUsed());
+      if (hasDfsUsed()) {
+        result = result && (getDfsUsed()
+            == other.getDfsUsed());
+      }
+      result = result && (hasRemaining() == other.hasRemaining());
+      if (hasRemaining()) {
+        result = result && (getRemaining()
+            == other.getRemaining());
+      }
+      result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed());
+      if (hasBlockPoolUsed()) {
+        result = result && (getBlockPoolUsed()
+            == other.getBlockPoolUsed());
+      }
+      result = result && (hasLastUpdate() == other.hasLastUpdate());
+      if (hasLastUpdate()) {
+        result = result && (getLastUpdate()
+            == other.getLastUpdate());
+      }
+      result = result && (hasXceiverCount() == other.hasXceiverCount());
+      if (hasXceiverCount()) {
+        result = result && (getXceiverCount()
+            == other.getXceiverCount());
+      }
+      result = result && (hasLocation() == other.hasLocation());
+      if (hasLocation()) {
+        result = result && getLocation()
+            .equals(other.getLocation());
+      }
+      result = result && (hasHostName() == other.hasHostName());
+      if (hasHostName()) {
+        result = result && getHostName()
+            .equals(other.getHostName());
+      }
+      result = result && (hasAdminState() == other.hasAdminState());
+      if (hasAdminState()) {
+        result = result &&
+            (getAdminState() == other.getAdminState());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+    
+    @java.lang.Override
+    public int hashCode() {
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasId()) {
+        hash = (37 * hash) + ID_FIELD_NUMBER;
+        hash = (53 * hash) + getId().hashCode();
+      }
+      if (hasCapacity()) {
+        hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getCapacity());
+      }
+      if (hasDfsUsed()) {
+        hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getDfsUsed());
+      }
+      if (hasRemaining()) {
+        hash = (37 * hash) + REMAINING_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getRemaining());
+      }
+      if (hasBlockPoolUsed()) {
+        hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getBlockPoolUsed());
+      }
+      if (hasLastUpdate()) {
+        hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getLastUpdate());
+      }
+      if (hasXceiverCount()) {
+        hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
+        hash = (53 * hash) + getXceiverCount();
+      }
+      if (hasLocation()) {
+        hash = (37 * hash) + LOCATION_FIELD_NUMBER;
+        hash = (53 * hash) + getLocation().hashCode();
+      }
+      if (hasHostName()) {
+        hash = (37 * hash) + HOSTNAME_FIELD_NUMBER;
+        hash = (53 * hash) + getHostName().hashCode();
+      }
+      if (hasAdminState()) {
+        hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getAdminState());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      return hash;
+    }
+    
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_fieldAccessorTable;
+      }
+      
+      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getIdFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+      
+      public Builder clear() {
+        super.clear();
+        if (idBuilder_ == null) {
+          id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
+        } else {
+          idBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        capacity_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        dfsUsed_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        remaining_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000008);
+        blockPoolUsed_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000010);
+        lastUpdate_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000020);
+        xceiverCount_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000040);
+        location_ = "";
+        bitField0_ = (bitField0_ & ~0x00000080);
+        hostName_ = "";
+        bitField0_ = (bitField0_ & ~0x00000100);
+        adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
+        bitField0_ = (bitField0_ & ~0x00000200);
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor();
+      }
+      
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() {
+        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
+      }
+      
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() {
+        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+      
+      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
+      }
+      
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() {
+        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (idBuilder_ == null) {
+          result.id_ = id_;
+        } else {
+          result.id_ = idBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.capacity_ = capacity_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.dfsUsed_ = dfsUsed_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.remaining_ = remaining_;
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        result.blockPoolUsed_ = blockPoolUsed_;
+        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+          to_bitField0_ |= 0x00000020;
+        }
+        result.lastUpdate_ = lastUpdate_;
+        if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+          to_bitField0_ |= 0x00000040;
+        }
+        result.xceiverCount_ = xceiverCount_;
+        if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+          to_bitField0_ |= 0x00000080;
+        }
+        result.location_ = location_;
+        if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+          to_bitField0_ |= 0x00000100;
+        }
+        result.hostName_ = hostName_;
+        if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+          to_bitField0_ |= 0x00000200;
+        }
+        result.adminState_ = adminState_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) {
+          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other) {
+        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) return this;
+        if (other.hasId()) {
+          mergeId(other.getId());
+        }
+        if (other.hasCapacity()) {
+          setCapacity(other.getCapacity());
+        }
+        if (other.hasDfsUsed()) {
+          setDfsUsed(other.getDfsUsed());
+        }
+        if (other.hasRemaining()) {
+          setRemaining(other.getRemaining());
+        }
+        if (other.hasBlockPoolUsed()) {
+          setBlockPoolUsed(other.getBlockPoolUsed());
+        }
+        if (other.hasLastUpdate()) {
+          setLastUpdate(other.getLastUpdate());
+        }
+        if (other.hasXceiverCount()) {
+          setXceiverCount(other.getXceiverCount());
+        }
+        if (other.hasLocation()) {
+          setLocation(other.getLocation());
+        }
+        if (other.hasHostName()) {
+          setHostName(other.getHostName());
+        }
+        if (other.hasAdminState()) {
+          setAdminState(other.getAdminState());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public final boolean isInitialized() {
+        if (!hasId()) {
+          
+          return false;
+        }
+        if (!getId().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
+              if (hasId()) {
+                subBuilder.mergeFrom(getId());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setId(subBuilder.buildPartial());
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              capacity_ = input.readUInt64();
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              dfsUsed_ = input.readUInt64();
+              break;
+            }
+            case 32: {
+              bitField0_ |= 0x00000008;
+              remaining_ = input.readUInt64();
+              break;
+            }
+            case 40: {
+              bitField0_ |= 0x00000010;
+              blockPoolUsed_ = input.readUInt64();
+              break;
+            }
+            case 48: {
+              bitField0_ |= 0x00000020;
+              lastUpdate_ = input.readUInt64();
+              break;
+            }
+            case 56: {
+              bitField0_ |= 0x00000040;
+              xceiverCount_ = input.readUInt32();
+              break;
+            }
+            case 66: {
+              bitField0_ |= 0x00000080;
+              location_ = input.readBytes();
+              break;
+            }
+            case 74: {
+              bitField0_ |= 0x00000100;
+              hostName_ = input.readBytes();
+              break;
+            }
+            case 80: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(10, rawValue);
+              } else {
+                bitField0_ |= 0x00000200;
+                adminState_ = value;
+              }
+              break;
+            }
+          }
+        }
+      }
+      
+      private int bitField0_;
+      
+      // required .DatanodeIDProto id = 1;
+      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> idBuilder_;
+      public boolean hasId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
+        if (idBuilder_ == null) {
+          return id_;
+        } else {
+          return idBuilder_.getMessage();
+        }
+      }
+      public Builder setId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
+        if (idBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          id_ = value;
+          onChanged();
+        } else {
+          idBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder setId(
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
+        if (idBuilder_ == null) {
+          id_ = builderForValue.build();
+          onChanged();
+        } else {
+          idBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
+        if (idBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              id_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) {
+            id_ =
+              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(id_).mergeFrom(value).buildPartial();
+          } else {
+            id_ = value;
+          }
+          onChanged();
+        } else {
+          idBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder clearId() {
+        if (idBuilder_ == null) {

[... 20681 lines stripped ...]


Mime
View raw message