hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jiten...@apache.org
Subject svn commit: r1212606 [4/5] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/protocol/proto/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/java/org/apache/hadoop/hdfs/server/protocol/ src/pro...
Date Fri, 09 Dec 2011 20:02:35 GMT
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java?rev=1212606&r1=1212605&r2=1212606&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java Fri Dec  9 20:02:33 2011
@@ -3253,779 +3253,6 @@ public final class NamenodeProtocolProto
     // @@protoc_insertion_point(class_scope:RollEditLogResponseProto)
   }
   
-  public interface VersionRequestProtoOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-  }
-  public static final class VersionRequestProto extends
-      com.google.protobuf.GeneratedMessage
-      implements VersionRequestProtoOrBuilder {
-    // Use VersionRequestProto.newBuilder() to construct.
-    private VersionRequestProto(Builder builder) {
-      super(builder);
-    }
-    private VersionRequestProto(boolean noInit) {}
-    
-    private static final VersionRequestProto defaultInstance;
-    public static VersionRequestProto getDefaultInstance() {
-      return defaultInstance;
-    }
-    
-    public VersionRequestProto getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-    
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_VersionRequestProto_descriptor;
-    }
-    
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_VersionRequestProto_fieldAccessorTable;
-    }
-    
-    private void initFields() {
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-      
-      memoizedIsInitialized = 1;
-      return true;
-    }
-    
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      getUnknownFields().writeTo(output);
-    }
-    
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-    
-      size = 0;
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-    
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-    
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto) obj;
-      
-      boolean result = true;
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-    
-    @java.lang.Override
-    public int hashCode() {
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      return hash;
-    }
-    
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data, extensionRegistry)
-               .buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data, extensionRegistry)
-               .buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input, extensionRegistry)
-               .buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      Builder builder = newBuilder();
-      if (builder.mergeDelimitedFrom(input)) {
-        return builder.buildParsed();
-      } else {
-        return null;
-      }
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      Builder builder = newBuilder();
-      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
-        return builder.buildParsed();
-      } else {
-        return null;
-      }
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input, extensionRegistry)
-               .buildParsed();
-    }
-    
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-    
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProtoOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_VersionRequestProto_descriptor;
-      }
-      
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_VersionRequestProto_fieldAccessorTable;
-      }
-      
-      // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-      
-      private Builder(BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-      
-      public Builder clear() {
-        super.clear();
-        return this;
-      }
-      
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-      
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto.getDescriptor();
-      }
-      
-      public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto getDefaultInstanceForType() {
-        return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto.getDefaultInstance();
-      }
-      
-      public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto build() {
-        org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-      
-      private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto buildParsed()
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(
-            result).asInvalidProtocolBufferException();
-        }
-        return result;
-      }
-      
-      public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto buildPartial() {
-        org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto(this);
-        onBuilt();
-        return result;
-      }
-      
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto) {
-          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-      
-      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto other) {
-        if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto.getDefaultInstance()) return this;
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-      
-      public final boolean isInitialized() {
-        return true;
-      }
-      
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder(
-            this.getUnknownFields());
-        while (true) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              this.setUnknownFields(unknownFields.build());
-              onChanged();
-              return this;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                this.setUnknownFields(unknownFields.build());
-                onChanged();
-                return this;
-              }
-              break;
-            }
-          }
-        }
-      }
-      
-      
-      // @@protoc_insertion_point(builder_scope:VersionRequestProto)
-    }
-    
-    static {
-      defaultInstance = new VersionRequestProto(true);
-      defaultInstance.initFields();
-    }
-    
-    // @@protoc_insertion_point(class_scope:VersionRequestProto)
-  }
-  
-  public interface VersionResponseProtoOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-    
-    // required .NamespaceInfoProto info = 1;
-    boolean hasInfo();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder();
-  }
-  public static final class VersionResponseProto extends
-      com.google.protobuf.GeneratedMessage
-      implements VersionResponseProtoOrBuilder {
-    // Use VersionResponseProto.newBuilder() to construct.
-    private VersionResponseProto(Builder builder) {
-      super(builder);
-    }
-    private VersionResponseProto(boolean noInit) {}
-    
-    private static final VersionResponseProto defaultInstance;
-    public static VersionResponseProto getDefaultInstance() {
-      return defaultInstance;
-    }
-    
-    public VersionResponseProto getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-    
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_VersionResponseProto_descriptor;
-    }
-    
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_VersionResponseProto_fieldAccessorTable;
-    }
-    
-    private int bitField0_;
-    // required .NamespaceInfoProto info = 1;
-    public static final int INFO_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto info_;
-    public boolean hasInfo() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo() {
-      return info_;
-    }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder() {
-      return info_;
-    }
-    
-    private void initFields() {
-      info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-      
-      if (!hasInfo()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!getInfo().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-    
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, info_);
-      }
-      getUnknownFields().writeTo(output);
-    }
-    
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-    
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, info_);
-      }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-    
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-    
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto) obj;
-      
-      boolean result = true;
-      result = result && (hasInfo() == other.hasInfo());
-      if (hasInfo()) {
-        result = result && getInfo()
-            .equals(other.getInfo());
-      }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-    
-    @java.lang.Override
-    public int hashCode() {
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasInfo()) {
-        hash = (37 * hash) + INFO_FIELD_NUMBER;
-        hash = (53 * hash) + getInfo().hashCode();
-      }
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      return hash;
-    }
-    
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data, extensionRegistry)
-               .buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return newBuilder().mergeFrom(data, extensionRegistry)
-               .buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input, extensionRegistry)
-               .buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      Builder builder = newBuilder();
-      if (builder.mergeDelimitedFrom(input)) {
-        return builder.buildParsed();
-      } else {
-        return null;
-      }
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      Builder builder = newBuilder();
-      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
-        return builder.buildParsed();
-      } else {
-        return null;
-      }
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input).buildParsed();
-    }
-    public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return newBuilder().mergeFrom(input, extensionRegistry)
-               .buildParsed();
-    }
-    
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-    
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProtoOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_VersionResponseProto_descriptor;
-      }
-      
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_VersionResponseProto_fieldAccessorTable;
-      }
-      
-      // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-      
-      private Builder(BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getInfoFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-      
-      public Builder clear() {
-        super.clear();
-        if (infoBuilder_ == null) {
-          info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
-        } else {
-          infoBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-      
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-      
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.getDescriptor();
-      }
-      
-      public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto getDefaultInstanceForType() {
-        return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.getDefaultInstance();
-      }
-      
-      public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto build() {
-        org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-      
-      private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto buildParsed()
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(
-            result).asInvalidProtocolBufferException();
-        }
-        return result;
-      }
-      
-      public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto buildPartial() {
-        org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        if (infoBuilder_ == null) {
-          result.info_ = info_;
-        } else {
-          result.info_ = infoBuilder_.build();
-        }
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-      
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto) {
-          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-      
-      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto other) {
-        if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.getDefaultInstance()) return this;
-        if (other.hasInfo()) {
-          mergeInfo(other.getInfo());
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-      
-      public final boolean isInitialized() {
-        if (!hasInfo()) {
-          
-          return false;
-        }
-        if (!getInfo().isInitialized()) {
-          
-          return false;
-        }
-        return true;
-      }
-      
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder(
-            this.getUnknownFields());
-        while (true) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              this.setUnknownFields(unknownFields.build());
-              onChanged();
-              return this;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                this.setUnknownFields(unknownFields.build());
-                onChanged();
-                return this;
-              }
-              break;
-            }
-            case 10: {
-              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder();
-              if (hasInfo()) {
-                subBuilder.mergeFrom(getInfo());
-              }
-              input.readMessage(subBuilder, extensionRegistry);
-              setInfo(subBuilder.buildPartial());
-              break;
-            }
-          }
-        }
-      }
-      
-      private int bitField0_;
-      
-      // required .NamespaceInfoProto info = 1;
-      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> infoBuilder_;
-      public boolean hasInfo() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo() {
-        if (infoBuilder_ == null) {
-          return info_;
-        } else {
-          return infoBuilder_.getMessage();
-        }
-      }
-      public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
-        if (infoBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          info_ = value;
-          onChanged();
-        } else {
-          infoBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      public Builder setInfo(
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
-        if (infoBuilder_ == null) {
-          info_ = builderForValue.build();
-          onChanged();
-        } else {
-          infoBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
-        if (infoBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              info_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
-            info_ =
-              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(info_).mergeFrom(value).buildPartial();
-          } else {
-            info_ = value;
-          }
-          onChanged();
-        } else {
-          infoBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      public Builder clearInfo() {
-        if (infoBuilder_ == null) {
-          info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
-          onChanged();
-        } else {
-          infoBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getInfoBuilder() {
-        bitField0_ |= 0x00000001;
-        onChanged();
-        return getInfoFieldBuilder().getBuilder();
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder() {
-        if (infoBuilder_ != null) {
-          return infoBuilder_.getMessageOrBuilder();
-        } else {
-          return info_;
-        }
-      }
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
-          getInfoFieldBuilder() {
-        if (infoBuilder_ == null) {
-          infoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
-              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
-                  info_,
-                  getParentForChildren(),
-                  isClean());
-          info_ = null;
-        }
-        return infoBuilder_;
-      }
-      
-      // @@protoc_insertion_point(builder_scope:VersionResponseProto)
-    }
-    
-    static {
-      defaultInstance = new VersionResponseProto(true);
-      defaultInstance.initFields();
-    }
-    
-    // @@protoc_insertion_point(class_scope:VersionResponseProto)
-  }
-  
   public interface ErrorReportRequestProtoOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
@@ -8696,8 +7923,8 @@ public final class NamenodeProtocolProto
       
       public abstract void versionRequest(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto request,
-          com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto> done);
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto> done);
       
       public abstract void errorReport(
           com.google.protobuf.RpcController controller,
@@ -8764,8 +7991,8 @@ public final class NamenodeProtocolProto
         @java.lang.Override
         public  void versionRequest(
             com.google.protobuf.RpcController controller,
-            org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto request,
-            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto> done) {
+            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto> done) {
           impl.versionRequest(controller, request, done);
         }
         
@@ -8840,7 +8067,7 @@ public final class NamenodeProtocolProto
             case 3:
               return impl.rollEditLog(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)request);
             case 4:
-              return impl.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto)request);
+              return impl.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)request);
             case 5:
               return impl.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)request);
             case 6:
@@ -8874,7 +8101,7 @@ public final class NamenodeProtocolProto
             case 3:
               return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance();
             case 4:
-              return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto.getDefaultInstance();
+              return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance();
             case 5:
               return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
             case 6:
@@ -8908,7 +8135,7 @@ public final class NamenodeProtocolProto
             case 3:
               return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance();
             case 4:
-              return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.getDefaultInstance();
+              return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance();
             case 5:
               return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
             case 6:
@@ -8949,8 +8176,8 @@ public final class NamenodeProtocolProto
     
     public abstract void versionRequest(
         com.google.protobuf.RpcController controller,
-        org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto request,
-        com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto> done);
+        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto> done);
     
     public abstract void errorReport(
         com.google.protobuf.RpcController controller,
@@ -9020,8 +8247,8 @@ public final class NamenodeProtocolProto
               done));
           return;
         case 4:
-          this.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto)request,
-            com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto>specializeCallback(
+          this.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto>specializeCallback(
               done));
           return;
         case 5:
@@ -9072,7 +8299,7 @@ public final class NamenodeProtocolProto
         case 3:
           return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance();
         case 4:
-          return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto.getDefaultInstance();
+          return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance();
         case 5:
           return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
         case 6:
@@ -9106,7 +8333,7 @@ public final class NamenodeProtocolProto
         case 3:
           return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance();
         case 4:
-          return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.getDefaultInstance();
+          return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance();
         case 5:
           return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
         case 6:
@@ -9200,17 +8427,17 @@ public final class NamenodeProtocolProto
       
       public  void versionRequest(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto request,
-          com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto> done) {
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto> done) {
         channel.callMethod(
           getDescriptor().getMethods().get(4),
           controller,
           request,
-          org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.getDefaultInstance(),
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(),
           com.google.protobuf.RpcUtil.generalizeCallback(
             done,
-            org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.class,
-            org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.getDefaultInstance()));
+            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.class,
+            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance()));
       }
       
       public  void errorReport(
@@ -9315,9 +8542,9 @@ public final class NamenodeProtocolProto
           org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request)
           throws com.google.protobuf.ServiceException;
       
-      public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto versionRequest(
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto versionRequest(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto request)
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request)
           throws com.google.protobuf.ServiceException;
       
       public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto errorReport(
@@ -9401,15 +8628,15 @@ public final class NamenodeProtocolProto
       }
       
       
-      public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto versionRequest(
+      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto versionRequest(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto request)
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto) channel.callBlockingMethod(
+        return (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) channel.callBlockingMethod(
           getDescriptor().getMethods().get(4),
           controller,
           request,
-          org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.getDefaultInstance());
+          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance());
       }
       
       
@@ -9516,16 +8743,6 @@ public final class NamenodeProtocolProto
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_RollEditLogResponseProto_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_VersionRequestProto_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_VersionRequestProto_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_VersionResponseProto_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_VersionResponseProto_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_ErrorReportRequestProto_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -9595,48 +8812,45 @@ public final class NamenodeProtocolProto
       "actionIdResponseProto\022\014\n\004txId\030\001 \002(\004\"\031\n\027R" +
       "ollEditLogRequestProto\"H\n\030RollEditLogRes",
       "ponseProto\022,\n\tsignature\030\001 \002(\0132\031.Checkpoi" +
-      "ntSignatureProto\"\025\n\023VersionRequestProto\"" +
-      "9\n\024VersionResponseProto\022!\n\004info\030\001 \002(\0132\023." +
-      "NamespaceInfoProto\"k\n\027ErrorReportRequest" +
-      "Proto\0220\n\014registration\030\001 \002(\0132\032.NamenodeRe" +
-      "gistrationProto\022\021\n\terrorCode\030\002 \002(\r\022\013\n\003ms" +
-      "g\030\003 \002(\t\"\032\n\030ErrorReportResponseProto\"H\n\024R" +
-      "egisterRequestProto\0220\n\014registration\030\001 \002(" +
-      "\0132\032.NamenodeRegistrationProto\"I\n\025Registe" +
-      "rResponseProto\0220\n\014registration\030\001 \002(\0132\032.N",
-      "amenodeRegistrationProto\"O\n\033StartCheckpo" +
-      "intRequestProto\0220\n\014registration\030\001 \002(\0132\032." +
-      "NamenodeRegistrationProto\"F\n\034StartCheckp" +
-      "ointResponseProto\022&\n\007command\030\001 \002(\0132\025.Nam" +
-      "enodeCommandProto\"{\n\031EndCheckpointReques" +
-      "tProto\0220\n\014registration\030\001 \002(\0132\032.NamenodeR" +
-      "egistrationProto\022,\n\tsignature\030\002 \002(\0132\031.Ch" +
-      "eckpointSignatureProto\"\034\n\032EndCheckpointR" +
-      "esponseProto\"3\n\036GetEditLogManifestReques" +
-      "tProto\022\021\n\tsinceTxId\030\001 \002(\004\"P\n\037GetEditLogM",
-      "anifestResponseProto\022-\n\010manifest\030\001 \002(\0132\033" +
-      ".RemoteEditLogManifestProto2\345\005\n\027Namenode" +
-      "ProtocolService\022<\n\tgetBlocks\022\026.GetBlocks" +
-      "RequestProto\032\027.GetBlocksResponseProto\022E\n" +
-      "\014getBlockKeys\022\031.GetBlockKeysRequestProto" +
-      "\032\032.GetBlockKeysResponseProto\022P\n\017getTrans" +
-      "ationId\022\035.GetTransactionIdRequestProto\032\036" +
-      ".GetTransactionIdResponseProto\022B\n\013rollEd" +
-      "itLog\022\030.RollEditLogRequestProto\032\031.RollEd" +
-      "itLogResponseProto\022=\n\016versionRequest\022\024.V",
-      "ersionRequestProto\032\025.VersionResponseProt" +
-      "o\022B\n\013errorReport\022\030.ErrorReportRequestPro" +
-      "to\032\031.ErrorReportResponseProto\0229\n\010registe" +
-      "r\022\025.RegisterRequestProto\032\026.RegisterRespo" +
-      "nseProto\022N\n\017startCheckpoint\022\034.StartCheck" +
-      "pointRequestProto\032\035.StartCheckpointRespo" +
-      "nseProto\022H\n\rendCheckpoint\022\032.EndCheckpoin" +
-      "tRequestProto\032\033.EndCheckpointResponsePro" +
-      "to\022W\n\022getEditLogManifest\022\037.GetEditLogMan" +
-      "ifestRequestProto\032 .GetEditLogManifestRe",
-      "sponseProtoBE\n%org.apache.hadoop.hdfs.pr" +
-      "otocol.protoB\026NamenodeProtocolProtos\210\001\001\240" +
-      "\001\001"
+      "ntSignatureProto\"k\n\027ErrorReportRequestPr" +
+      "oto\0220\n\014registration\030\001 \002(\0132\032.NamenodeRegi" +
+      "strationProto\022\021\n\terrorCode\030\002 \002(\r\022\013\n\003msg\030" +
+      "\003 \002(\t\"\032\n\030ErrorReportResponseProto\"H\n\024Reg" +
+      "isterRequestProto\0220\n\014registration\030\001 \002(\0132" +
+      "\032.NamenodeRegistrationProto\"I\n\025RegisterR" +
+      "esponseProto\0220\n\014registration\030\001 \002(\0132\032.Nam" +
+      "enodeRegistrationProto\"O\n\033StartCheckpoin" +
+      "tRequestProto\0220\n\014registration\030\001 \002(\0132\032.Na",
+      "menodeRegistrationProto\"F\n\034StartCheckpoi" +
+      "ntResponseProto\022&\n\007command\030\001 \002(\0132\025.Namen" +
+      "odeCommandProto\"{\n\031EndCheckpointRequestP" +
+      "roto\0220\n\014registration\030\001 \002(\0132\032.NamenodeReg" +
+      "istrationProto\022,\n\tsignature\030\002 \002(\0132\031.Chec" +
+      "kpointSignatureProto\"\034\n\032EndCheckpointRes" +
+      "ponseProto\"3\n\036GetEditLogManifestRequestP" +
+      "roto\022\021\n\tsinceTxId\030\001 \002(\004\"P\n\037GetEditLogMan" +
+      "ifestResponseProto\022-\n\010manifest\030\001 \002(\0132\033.R" +
+      "emoteEditLogManifestProto2\345\005\n\027NamenodePr",
+      "otocolService\022<\n\tgetBlocks\022\026.GetBlocksRe" +
+      "questProto\032\027.GetBlocksResponseProto\022E\n\014g" +
+      "etBlockKeys\022\031.GetBlockKeysRequestProto\032\032" +
+      ".GetBlockKeysResponseProto\022P\n\017getTransat" +
+      "ionId\022\035.GetTransactionIdRequestProto\032\036.G" +
+      "etTransactionIdResponseProto\022B\n\013rollEdit" +
+      "Log\022\030.RollEditLogRequestProto\032\031.RollEdit" +
+      "LogResponseProto\022=\n\016versionRequest\022\024.Ver" +
+      "sionRequestProto\032\025.VersionResponseProto\022" +
+      "B\n\013errorReport\022\030.ErrorReportRequestProto",
+      "\032\031.ErrorReportResponseProto\0229\n\010register\022" +
+      "\025.RegisterRequestProto\032\026.RegisterRespons" +
+      "eProto\022N\n\017startCheckpoint\022\034.StartCheckpo" +
+      "intRequestProto\032\035.StartCheckpointRespons" +
+      "eProto\022H\n\rendCheckpoint\022\032.EndCheckpointR" +
+      "equestProto\032\033.EndCheckpointResponseProto" +
+      "\022W\n\022getEditLogManifest\022\037.GetEditLogManif" +
+      "estRequestProto\032 .GetEditLogManifestResp" +
+      "onseProtoBE\n%org.apache.hadoop.hdfs.prot" +
+      "ocol.protoB\026NamenodeProtocolProtos\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -9707,24 +8921,8 @@ public final class NamenodeProtocolProto
               new java.lang.String[] { "Signature", },
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.Builder.class);
-          internal_static_VersionRequestProto_descriptor =
-            getDescriptor().getMessageTypes().get(8);
-          internal_static_VersionRequestProto_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_VersionRequestProto_descriptor,
-              new java.lang.String[] { },
-              org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto.class,
-              org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto.Builder.class);
-          internal_static_VersionResponseProto_descriptor =
-            getDescriptor().getMessageTypes().get(9);
-          internal_static_VersionResponseProto_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_VersionResponseProto_descriptor,
-              new java.lang.String[] { "Info", },
-              org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.class,
-              org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto.Builder.class);
           internal_static_ErrorReportRequestProto_descriptor =
-            getDescriptor().getMessageTypes().get(10);
+            getDescriptor().getMessageTypes().get(8);
           internal_static_ErrorReportRequestProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_ErrorReportRequestProto_descriptor,
@@ -9732,7 +8930,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.Builder.class);
           internal_static_ErrorReportResponseProto_descriptor =
-            getDescriptor().getMessageTypes().get(11);
+            getDescriptor().getMessageTypes().get(9);
           internal_static_ErrorReportResponseProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_ErrorReportResponseProto_descriptor,
@@ -9740,7 +8938,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.Builder.class);
           internal_static_RegisterRequestProto_descriptor =
-            getDescriptor().getMessageTypes().get(12);
+            getDescriptor().getMessageTypes().get(10);
           internal_static_RegisterRequestProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_RegisterRequestProto_descriptor,
@@ -9748,7 +8946,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.Builder.class);
           internal_static_RegisterResponseProto_descriptor =
-            getDescriptor().getMessageTypes().get(13);
+            getDescriptor().getMessageTypes().get(11);
           internal_static_RegisterResponseProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_RegisterResponseProto_descriptor,
@@ -9756,7 +8954,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.Builder.class);
           internal_static_StartCheckpointRequestProto_descriptor =
-            getDescriptor().getMessageTypes().get(14);
+            getDescriptor().getMessageTypes().get(12);
           internal_static_StartCheckpointRequestProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_StartCheckpointRequestProto_descriptor,
@@ -9764,7 +8962,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.Builder.class);
           internal_static_StartCheckpointResponseProto_descriptor =
-            getDescriptor().getMessageTypes().get(15);
+            getDescriptor().getMessageTypes().get(13);
           internal_static_StartCheckpointResponseProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_StartCheckpointResponseProto_descriptor,
@@ -9772,7 +8970,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.Builder.class);
           internal_static_EndCheckpointRequestProto_descriptor =
-            getDescriptor().getMessageTypes().get(16);
+            getDescriptor().getMessageTypes().get(14);
           internal_static_EndCheckpointRequestProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_EndCheckpointRequestProto_descriptor,
@@ -9780,7 +8978,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.Builder.class);
           internal_static_EndCheckpointResponseProto_descriptor =
-            getDescriptor().getMessageTypes().get(17);
+            getDescriptor().getMessageTypes().get(15);
           internal_static_EndCheckpointResponseProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_EndCheckpointResponseProto_descriptor,
@@ -9788,7 +8986,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.Builder.class);
           internal_static_GetEditLogManifestRequestProto_descriptor =
-            getDescriptor().getMessageTypes().get(18);
+            getDescriptor().getMessageTypes().get(16);
           internal_static_GetEditLogManifestRequestProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_GetEditLogManifestRequestProto_descriptor,
@@ -9796,7 +8994,7 @@ public final class NamenodeProtocolProto
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.class,
               org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
           internal_static_GetEditLogManifestResponseProto_descriptor =
-            getDescriptor().getMessageTypes().get(19);
+            getDescriptor().getMessageTypes().get(17);
           internal_static_GetEditLogManifestResponseProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_GetEditLogManifestResponseProto_descriptor,

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java?rev=1212606&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java Fri Dec  9 20:02:33 2011
@@ -0,0 +1,302 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.hdfs.server.protocolR23Compatible.DatanodeWireProtocol;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * This class is the client side translator to translate the requests made on
+ * {@link DatanodeProtocolProtocol} interfaces to the RPC server implementing
+ * {@link DatanodeProtocolProtocolPB}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class DatanodeProtocolClientSideTranslatorPB implements DatanodeProtocol,
+    Closeable {
+  
+  /** RpcController is not used and hence is set to null */
+  private final static RpcController NULL_CONTROLLER = null;
+  private final DatanodeProtocolPB rpcProxy;
+  private static final VersionRequestProto VERSION_REQUEST = 
+      VersionRequestProto.newBuilder().build();
+  
+  public DatanodeProtocolClientSideTranslatorPB(InetSocketAddress nameNodeAddr,
+      Configuration conf) throws IOException {
+    RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
+  }
+
+  private static DatanodeProtocolPB createNamenode(
+      InetSocketAddress nameNodeAddr, Configuration conf,
+      UserGroupInformation ugi) throws IOException {
+    return RPC.getProxy(DatanodeProtocolPB.class,
+        RPC.getProtocolVersion(DatanodeProtocolPB.class), nameNodeAddr, ugi,
+        conf, NetUtils.getSocketFactory(conf, DatanodeWireProtocol.class));
+  }
+
+  /** Create a {@link NameNode} proxy */
+  static DatanodeProtocolPB createNamenodeWithRetry(
+      DatanodeProtocolPB rpcNamenode) {
+    RetryPolicy createPolicy = RetryPolicies
+        .retryUpToMaximumCountWithFixedSleep(5,
+            HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
+
+    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = 
+        new HashMap<Class<? extends Exception>, RetryPolicy>();
+    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
+        createPolicy);
+
+    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
+        new HashMap<Class<? extends Exception>, RetryPolicy>();
+    exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
+        .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
+            remoteExceptionToPolicyMap));
+    RetryPolicy methodPolicy = RetryPolicies.retryByException(
+        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
+    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
+
+    methodNameToPolicyMap.put("create", methodPolicy);
+
+    return (DatanodeProtocolPB) RetryProxy.create(DatanodeProtocolPB.class,
+        rpcNamenode, methodNameToPolicyMap);
+  }
+  
+  @Override
+  public long getProtocolVersion(String protocol, long clientVersion)
+      throws IOException {
+    return rpcProxy.getProtocolVersion(protocol, clientVersion);
+  }
+
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocolName,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
+        protocolName, clientVersion, clientMethodsHash));
+  }
+
+  @Override
+  public void close() throws IOException {
+    RPC.stopProxy(rpcProxy);
+  }
+
+  @Override
+  public DatanodeRegistration registerDatanode(DatanodeRegistration registration)
+      throws IOException {
+    RegisterDatanodeRequestProto req = RegisterDatanodeRequestProto
+        .newBuilder().setRegistration(PBHelper.convert(registration)).build();
+    RegisterDatanodeResponseProto resp;
+    try {
+      resp = rpcProxy.registerDatanode(NULL_CONTROLLER, req);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+    return PBHelper.convert(resp.getRegistration());
+  }
+
+  @Override
+  public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
+      long capacity, long dfsUsed, long remaining, long blockPoolUsed,
+      int xmitsInProgress, int xceiverCount, int failedVolumes)
+      throws IOException {
+    HeartbeatRequestProto req = HeartbeatRequestProto.newBuilder()
+        .setRegistration(PBHelper.convert(registration)).setCapacity(capacity)
+        .setCapacity(dfsUsed).setRemaining(remaining)
+        .setBlockPoolUsed(blockPoolUsed).setXmitsInProgress(xmitsInProgress)
+        .setXceiverCount(xceiverCount).setFailedVolumes(failedVolumes).build();
+    HeartbeatResponseProto resp;
+    try {
+      resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, req);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+    DatanodeCommand[] cmds = new DatanodeCommand[resp.getCmdsList().size()];
+    int index = 0;
+    for (DatanodeCommandProto p : resp.getCmdsList()) {
+      cmds[index] = PBHelper.convert(p);
+      index++;
+    }
+    return cmds;
+  }
+
+  @Override
+  public DatanodeCommand blockReport(DatanodeRegistration registration,
+      String poolId, long[] blocks) throws IOException {
+    BlockReportRequestProto.Builder builder = BlockReportRequestProto
+        .newBuilder().setRegistration(PBHelper.convert(registration))
+        .setBlockPoolId(poolId);
+    if (blocks != null) {
+      for (int i = 0; i < blocks.length; i++) {
+        builder.setBlocks(i, blocks[i]);
+      }
+    }
+    BlockReportRequestProto req = builder.build();
+    BlockReportResponseProto resp;
+    try {
+      resp = rpcProxy.blockReport(NULL_CONTROLLER, req);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+    return PBHelper.convert(resp.getCmd());
+  }
+
+  @Override
+  public void blockReceivedAndDeleted(DatanodeRegistration registration,
+      String poolId, ReceivedDeletedBlockInfo[] receivedAndDeletedBlocks)
+      throws IOException {
+    BlockReceivedAndDeletedRequestProto.Builder builder = 
+        BlockReceivedAndDeletedRequestProto.newBuilder()
+        .setRegistration(PBHelper.convert(registration))
+        .setBlockPoolId(poolId);
+    if (receivedAndDeletedBlocks != null) {
+      for (int i = 0; i < receivedAndDeletedBlocks.length; i++) {
+        builder.setBlocks(i, PBHelper.convert(receivedAndDeletedBlocks[i]));
+      }
+    }
+    BlockReceivedAndDeletedRequestProto req = builder.build();
+    try {
+      rpcProxy.blockReceivedAndDeleted(NULL_CONTROLLER, req);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+  }
+
+  @Override
+  public void errorReport(DatanodeRegistration registration, int errorCode,
+      String msg) throws IOException {
+    ErrorReportRequestProto req = ErrorReportRequestProto.newBuilder()
+        .setRegistartion(PBHelper.convert(registration))
+        .setErrorCode(errorCode).setMsg(msg).build();
+    try {
+      rpcProxy.errorReport(NULL_CONTROLLER, req);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+  }
+
+  @Override
+  public NamespaceInfo versionRequest() throws IOException {
+    try {
+      return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
+          VERSION_REQUEST).getInfo());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public UpgradeCommand processUpgradeCommand(UpgradeCommand comm)
+      throws IOException {
+    ProcessUpgradeRequestProto req = ProcessUpgradeRequestProto.newBuilder()
+        .setCmd(PBHelper.convert(comm)).build();
+    ProcessUpgradeResponseProto resp;
+    try {
+      resp = rpcProxy.processUpgrade(NULL_CONTROLLER, req);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+    return PBHelper.convert(resp.getCmd());
+  }
+
+  @Override
+  public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
+    ReportBadBlocksRequestProto.Builder builder = ReportBadBlocksRequestProto
+        .newBuilder();
+    for (int i = 0; i < blocks.length; i++) {
+      builder.addBlocks(i, PBHelper.convert(blocks[i]));
+    }
+    ReportBadBlocksRequestProto req = builder.build();
+    try {
+      rpcProxy.reportBadBlocks(NULL_CONTROLLER, req);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+  }
+
+  @Override
+  public void commitBlockSynchronization(ExtendedBlock block,
+      long newgenerationstamp, long newlength, boolean closeFile,
+      boolean deleteblock, DatanodeID[] newtargets) throws IOException {
+    CommitBlockSynchronizationRequestProto.Builder builder = 
+        CommitBlockSynchronizationRequestProto.newBuilder()
+        .setBlock(PBHelper.convert(block)).setNewGenStamp(newgenerationstamp)
+        .setNewLength(newlength).setCloseFile(closeFile)
+        .setDeleteBlock(deleteblock);
+    for (int i = 0; i < newtargets.length; i++) {
+      builder.setNewTaragets(i, PBHelper.convert(newtargets[i]));
+    }
+    CommitBlockSynchronizationRequestProto req = builder.build();
+    try {
+      rpcProxy.commitBlockSynchronization(NULL_CONTROLLER, req);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolPB.java?rev=1212606&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolPB.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolPB.java Fri Dec  9 20:02:33 2011
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
+
+@KerberosInfo(
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, 
+    clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
+@ProtocolInfo(
+    protocolName = "org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol", 
+    protocolVersion = 1)
+@InterfaceAudience.Private
+public interface DatanodeProtocolPB extends
+    DatanodeProtocolService.BlockingInterface, VersionedProtocol {
+  
+  /**
+   * This method is defined to get the protocol signature using 
+   * the R23 protocol - hence we have added the suffix of 2 the method name
+   * to avoid conflict.
+   */
+  public ProtocolSignatureWritable getProtocolSignature2(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException;
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java?rev=1212606&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java Fri Dec  9 20:02:33 2011
@@ -0,0 +1,263 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class DatanodeProtocolServerSideTranslatorPB implements
+    DatanodeProtocolPB {
+
+  private final DatanodeProtocol impl;
+  private static final ErrorReportResponseProto ERROR_REPORT_RESPONSE_PROTO = 
+      ErrorReportResponseProto.newBuilder().build();
+  private static final BlockReceivedAndDeletedResponseProto 
+      BLOCK_RECEIVED_AND_DELETE_RESPONSE = 
+          BlockReceivedAndDeletedResponseProto.newBuilder().build();
+  private static final ReportBadBlocksResponseProto REPORT_BAD_BLOCK_RESPONSE = 
+      ReportBadBlocksResponseProto.newBuilder().build();
+  private static final CommitBlockSynchronizationResponseProto 
+      COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
+          CommitBlockSynchronizationResponseProto.newBuilder().build();
+
+  public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
+    this.impl = impl;
+  }
+
+  @Override
+  public RegisterDatanodeResponseProto registerDatanode(
+      RpcController controller, RegisterDatanodeRequestProto request)
+      throws ServiceException {
+    DatanodeRegistration registration = PBHelper.convert(request
+        .getRegistration());
+    DatanodeRegistration registrationResp;
+    try {
+      registrationResp = impl.registerDatanode(registration);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return RegisterDatanodeResponseProto.newBuilder()
+        .setRegistration(PBHelper.convert(registrationResp)).build();
+  }
+
+  @Override
+  public HeartbeatResponseProto sendHeartbeat(RpcController controller,
+      HeartbeatRequestProto request) throws ServiceException {
+    DatanodeCommand[] cmds;
+    try {
+      cmds = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
+          request.getCapacity(), request.getDfsUsed(), request.getRemaining(),
+          request.getBlockPoolUsed(), request.getXmitsInProgress(),
+          request.getXceiverCount(), request.getFailedVolumes());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    HeartbeatResponseProto.Builder builder = HeartbeatResponseProto
+        .newBuilder();
+    if (cmds != null) {
+      for (int i = 0; i < cmds.length; i++) {
+        builder.addCmds(i, PBHelper.convert(cmds[i]));
+      }
+    }
+    return builder.build();
+  }
+
+  @Override
+  public BlockReportResponseProto blockReport(RpcController controller,
+      BlockReportRequestProto request) throws ServiceException {
+    DatanodeCommand cmd;
+    List<Long> blockIds = request.getBlocksList();
+    long[] blocks = new long[blockIds.size()];
+    for (int i = 0; i < blockIds.size(); i++) {
+      blocks[i] = blockIds.get(i);
+    }
+    try {
+      cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
+          request.getBlockPoolId(), blocks);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return BlockReportResponseProto.newBuilder().setCmd(PBHelper.convert(cmd))
+        .build();
+  }
+
+  @Override
+  public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
+      RpcController controller, BlockReceivedAndDeletedRequestProto request)
+      throws ServiceException {
+    List<ReceivedDeletedBlockInfoProto> rdbip = request.getBlocksList();
+    ReceivedDeletedBlockInfo[] info = 
+        new ReceivedDeletedBlockInfo[rdbip.size()];
+    for (int i = 0; i < rdbip.size(); i++) {
+      info[i] = PBHelper.convert(rdbip.get(i));
+    }
+    try {
+      impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()),
+          request.getBlockPoolId(), info);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return BLOCK_RECEIVED_AND_DELETE_RESPONSE;
+  }
+
+  @Override
+  public ErrorReportResponseProto errorReport(RpcController controller,
+      ErrorReportRequestProto request) throws ServiceException {
+    try {
+      impl.errorReport(PBHelper.convert(request.getRegistartion()),
+          request.getErrorCode(), request.getMsg());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return ERROR_REPORT_RESPONSE_PROTO;
+  }
+
+  @Override
+  public VersionResponseProto versionRequest(RpcController controller,
+      VersionRequestProto request) throws ServiceException {
+    NamespaceInfo info;
+    try {
+      info = impl.versionRequest();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return VersionResponseProto.newBuilder()
+        .setInfo(PBHelper.convert(info)).build();
+  }
+
+  @Override
+  public ProcessUpgradeResponseProto processUpgrade(RpcController controller,
+      ProcessUpgradeRequestProto request) throws ServiceException {
+    UpgradeCommand cmd;
+    try {
+      cmd = impl.processUpgradeCommand(PBHelper.convert(request.getCmd()));
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return ProcessUpgradeResponseProto.newBuilder()
+        .setCmd(PBHelper.convert(cmd)).build();
+  }
+
+  @Override
+  public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
+      ReportBadBlocksRequestProto request) throws ServiceException {
+    List<LocatedBlockProto> lbps = request.getBlocksList();
+    LocatedBlock [] blocks = new LocatedBlock [lbps.size()];
+    for(int i=0; i<lbps.size(); i++) {
+      blocks[i] = PBHelper.convert(lbps.get(i));
+    }
+    try {
+      impl.reportBadBlocks(blocks);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return REPORT_BAD_BLOCK_RESPONSE;
+  }
+
+  @Override
+  public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
+      RpcController controller, CommitBlockSynchronizationRequestProto request)
+      throws ServiceException {
+    List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
+    DatanodeID[] dns = new DatanodeID[dnprotos.size()];
+    for (int i = 0; i < dnprotos.size(); i++) {
+      dns[i] = PBHelper.convert(dnprotos.get(i));
+    }
+    try {
+      impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
+          request.getNewGenStamp(), request.getNewLength(),
+          request.getCloseFile(), request.getDeleteBlock(), dns);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
+  }
+
+  @Override
+  public long getProtocolVersion(String protocol, long clientVersion)
+      throws IOException {
+    return RPC.getProtocolVersion(DatanodeProtocolPB.class);
+  }
+
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    /**
+     * Don't forward this to the server. The protocol version and signature is
+     * that of {@link DatanodeProtocol}
+     */
+    if (!protocol.equals(RPC.getProtocolName(DatanodeProtocolPB.class))) {
+      throw new IOException("Namenode Serverside implements " +
+          RPC.getProtocolName(DatanodeProtocolPB.class) +
+          ". The following requested protocol is unknown: " + protocol);
+    }
+
+    return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+        RPC.getProtocolVersion(DatanodeProtocolPB.class),
+        DatanodeProtocolPB.class);
+  }
+
+  @Override
+  public ProtocolSignatureWritable getProtocolSignature2(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    /**
+     * Don't forward this to the server. The protocol version and signature is
+     * that of {@link DatanodeProtocolPB}
+     */
+    return ProtocolSignatureWritable.convert(
+        this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
+  }
+
+}

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java?rev=1212606&r1=1212605&r2=1212606&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java Fri Dec  9 20:02:33 2011
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.hadoop.hdfs.protocolPB;
 
 import java.io.IOException;



Mime
View raw message