hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r928876 [9/13] - in /hadoop/hbase/trunk: ./ contrib/stargate/ contrib/stargate/conf/ contrib/stargate/core/ contrib/stargate/core/src/ contrib/stargate/core/src/main/ contrib/stargate/core/src/main/java/ contrib/stargate/core/src/main/java/...
Date Mon, 29 Mar 2010 19:15:20 GMT
Added: hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java?rev=928876&view=auto
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java (added)
+++ hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java Mon Mar 29 19:15:15 2010
@@ -0,0 +1,949 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: TableSchemaMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class TableSchemaMessage {
+  private TableSchemaMessage() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public static final class TableSchema extends
+      com.google.protobuf.GeneratedMessage {
+    // Use TableSchema.newBuilder() to construct.
+    private TableSchema() {
+      initFields();
+    }
+    private TableSchema(boolean noInit) {}
+    
+    private static final TableSchema defaultInstance;
+    public static TableSchema getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public TableSchema getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable;
+    }
+    
+    public static final class Attribute extends
+        com.google.protobuf.GeneratedMessage {
+      // Use Attribute.newBuilder() to construct.
+      private Attribute() {
+        initFields();
+      }
+      private Attribute(boolean noInit) {}
+      
+      private static final Attribute defaultInstance;
+      public static Attribute getDefaultInstance() {
+        return defaultInstance;
+      }
+      
+      public Attribute getDefaultInstanceForType() {
+        return defaultInstance;
+      }
+      
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
+      }
+      
+      // required string name = 1;
+      public static final int NAME_FIELD_NUMBER = 1;
+      private boolean hasName;
+      private java.lang.String name_ = "";
+      public boolean hasName() { return hasName; }
+      public java.lang.String getName() { return name_; }
+      
+      // required string value = 2;
+      public static final int VALUE_FIELD_NUMBER = 2;
+      private boolean hasValue;
+      private java.lang.String value_ = "";
+      public boolean hasValue() { return hasValue; }
+      public java.lang.String getValue() { return value_; }
+      
+      private void initFields() {
+      }
+      public final boolean isInitialized() {
+        if (!hasName) return false;
+        if (!hasValue) return false;
+        return true;
+      }
+      
+      public void writeTo(com.google.protobuf.CodedOutputStream output)
+                          throws java.io.IOException {
+        getSerializedSize();
+        if (hasName()) {
+          output.writeString(1, getName());
+        }
+        if (hasValue()) {
+          output.writeString(2, getValue());
+        }
+        getUnknownFields().writeTo(output);
+      }
+      
+      private int memoizedSerializedSize = -1;
+      public int getSerializedSize() {
+        int size = memoizedSerializedSize;
+        if (size != -1) return size;
+      
+        size = 0;
+        if (hasName()) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeStringSize(1, getName());
+        }
+        if (hasValue()) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeStringSize(2, getValue());
+        }
+        size += getUnknownFields().getSerializedSize();
+        memoizedSerializedSize = size;
+        return size;
+      }
+      
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+          com.google.protobuf.ByteString data)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return newBuilder().mergeFrom(data).buildParsed();
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+          com.google.protobuf.ByteString data,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return newBuilder().mergeFrom(data, extensionRegistry)
+                 .buildParsed();
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(byte[] data)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return newBuilder().mergeFrom(data).buildParsed();
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+          byte[] data,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return newBuilder().mergeFrom(data, extensionRegistry)
+                 .buildParsed();
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(java.io.InputStream input)
+          throws java.io.IOException {
+        return newBuilder().mergeFrom(input).buildParsed();
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+          java.io.InputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        return newBuilder().mergeFrom(input, extensionRegistry)
+                 .buildParsed();
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+          throws java.io.IOException {
+        Builder builder = newBuilder();
+        if (builder.mergeDelimitedFrom(input)) {
+          return builder.buildParsed();
+        } else {
+          return null;
+        }
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(
+          java.io.InputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        Builder builder = newBuilder();
+        if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+          return builder.buildParsed();
+        } else {
+          return null;
+        }
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+          com.google.protobuf.CodedInputStream input)
+          throws java.io.IOException {
+        return newBuilder().mergeFrom(input).buildParsed();
+      }
+      public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        return newBuilder().mergeFrom(input, extensionRegistry)
+                 .buildParsed();
+      }
+      
+      public static Builder newBuilder() { return Builder.create(); }
+      public Builder newBuilderForType() { return newBuilder(); }
+      public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute prototype) {
+        return newBuilder().mergeFrom(prototype);
+      }
+      public Builder toBuilder() { return newBuilder(this); }
+      
+      public static final class Builder extends
+          com.google.protobuf.GeneratedMessage.Builder<Builder> {
+        private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute result;
+        
+        // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder()
+        private Builder() {}
+        
+        private static Builder create() {
+          Builder builder = new Builder();
+          builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute();
+          return builder;
+        }
+        
+        protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute internalGetResult() {
+          return result;
+        }
+        
+        public Builder clear() {
+          if (result == null) {
+            throw new IllegalStateException(
+              "Cannot call clear() after build().");
+          }
+          result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute();
+          return this;
+        }
+        
+        public Builder clone() {
+          return create().mergeFrom(result);
+        }
+        
+        public com.google.protobuf.Descriptors.Descriptor
+            getDescriptorForType() {
+          return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDescriptor();
+        }
+        
+        public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getDefaultInstanceForType() {
+          return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance();
+        }
+        
+        public boolean isInitialized() {
+          return result.isInitialized();
+        }
+        public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute build() {
+          if (result != null && !isInitialized()) {
+            throw newUninitializedMessageException(result);
+          }
+          return buildPartial();
+        }
+        
+        private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildParsed()
+            throws com.google.protobuf.InvalidProtocolBufferException {
+          if (!isInitialized()) {
+            throw newUninitializedMessageException(
+              result).asInvalidProtocolBufferException();
+          }
+          return buildPartial();
+        }
+        
+        public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildPartial() {
+          if (result == null) {
+            throw new IllegalStateException(
+              "build() has already been called on this Builder.");
+          }
+          org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute returnMe = result;
+          result = null;
+          return returnMe;
+        }
+        
+        public Builder mergeFrom(com.google.protobuf.Message other) {
+          if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute) {
+            return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute)other);
+          } else {
+            super.mergeFrom(other);
+            return this;
+          }
+        }
+        
+        public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute other) {
+          if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance()) return this;
+          if (other.hasName()) {
+            setName(other.getName());
+          }
+          if (other.hasValue()) {
+            setValue(other.getValue());
+          }
+          this.mergeUnknownFields(other.getUnknownFields());
+          return this;
+        }
+        
+        public Builder mergeFrom(
+            com.google.protobuf.CodedInputStream input,
+            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+            throws java.io.IOException {
+          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+            com.google.protobuf.UnknownFieldSet.newBuilder(
+              this.getUnknownFields());
+          while (true) {
+            int tag = input.readTag();
+            switch (tag) {
+              case 0:
+                this.setUnknownFields(unknownFields.build());
+                return this;
+              default: {
+                if (!parseUnknownField(input, unknownFields,
+                                       extensionRegistry, tag)) {
+                  this.setUnknownFields(unknownFields.build());
+                  return this;
+                }
+                break;
+              }
+              case 10: {
+                setName(input.readString());
+                break;
+              }
+              case 18: {
+                setValue(input.readString());
+                break;
+              }
+            }
+          }
+        }
+        
+        
+        // required string name = 1;
+        public boolean hasName() {
+          return result.hasName();
+        }
+        public java.lang.String getName() {
+          return result.getName();
+        }
+        public Builder setName(java.lang.String value) {
+          if (value == null) {
+    throw new NullPointerException();
+  }
+  result.hasName = true;
+          result.name_ = value;
+          return this;
+        }
+        public Builder clearName() {
+          result.hasName = false;
+          result.name_ = getDefaultInstance().getName();
+          return this;
+        }
+        
+        // required string value = 2;
+        public boolean hasValue() {
+          return result.hasValue();
+        }
+        public java.lang.String getValue() {
+          return result.getValue();
+        }
+        public Builder setValue(java.lang.String value) {
+          if (value == null) {
+    throw new NullPointerException();
+  }
+  result.hasValue = true;
+          result.value_ = value;
+          return this;
+        }
+        public Builder clearValue() {
+          result.hasValue = false;
+          result.value_ = getDefaultInstance().getValue();
+          return this;
+        }
+        
+        // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute)
+      }
+      
+      static {
+        defaultInstance = new Attribute(true);
+        org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internalForceInit();
+        defaultInstance.initFields();
+      }
+      
+      // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute)
+    }
+    
+    // optional string name = 1;
+    public static final int NAME_FIELD_NUMBER = 1;
+    private boolean hasName;
+    private java.lang.String name_ = "";
+    public boolean hasName() { return hasName; }
+    public java.lang.String getName() { return name_; }
+    
+    // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute attrs = 2;
+    public static final int ATTRS_FIELD_NUMBER = 2;
+    private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> attrs_ =
+      java.util.Collections.emptyList();
+    public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> getAttrsList() {
+      return attrs_;
+    }
+    public int getAttrsCount() { return attrs_.size(); }
+    public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
+      return attrs_.get(index);
+    }
+    
+    // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema columns = 3;
+    public static final int COLUMNS_FIELD_NUMBER = 3;
+    private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> columns_ =
+      java.util.Collections.emptyList();
+    public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> getColumnsList() {
+      return columns_;
+    }
+    public int getColumnsCount() { return columns_.size(); }
+    public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
+      return columns_.get(index);
+    }
+    
+    // optional bool inMemory = 4;
+    public static final int INMEMORY_FIELD_NUMBER = 4;
+    private boolean hasInMemory;
+    private boolean inMemory_ = false;
+    public boolean hasInMemory() { return hasInMemory; }
+    public boolean getInMemory() { return inMemory_; }
+    
+    // optional bool readOnly = 5;
+    public static final int READONLY_FIELD_NUMBER = 5;
+    private boolean hasReadOnly;
+    private boolean readOnly_ = false;
+    public boolean hasReadOnly() { return hasReadOnly; }
+    public boolean getReadOnly() { return readOnly_; }
+    
+    private void initFields() {
+    }
+    public final boolean isInitialized() {
+      for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+        if (!element.isInitialized()) return false;
+      }
+      for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+        if (!element.isInitialized()) return false;
+      }
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (hasName()) {
+        output.writeString(1, getName());
+      }
+      for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+        output.writeMessage(2, element);
+      }
+      for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+        output.writeMessage(3, element);
+      }
+      if (hasInMemory()) {
+        output.writeBool(4, getInMemory());
+      }
+      if (hasReadOnly()) {
+        output.writeBool(5, getReadOnly());
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (hasName()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeStringSize(1, getName());
+      }
+      for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, element);
+      }
+      for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(3, element);
+      }
+      if (hasInMemory()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(4, getInMemory());
+      }
+      if (hasReadOnly()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(5, getReadOnly());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder> {
+      private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema result;
+      
+      // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.newBuilder()
+      private Builder() {}
+      
+      private static Builder create() {
+        Builder builder = new Builder();
+        builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema();
+        return builder;
+      }
+      
+      protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema internalGetResult() {
+        return result;
+      }
+      
+      public Builder clear() {
+        if (result == null) {
+          throw new IllegalStateException(
+            "Cannot call clear() after build().");
+        }
+        result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema();
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(result);
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDescriptor();
+      }
+      
+      public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance();
+      }
+      
+      public boolean isInitialized() {
+        return result.isInitialized();
+      }
+      public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema build() {
+        if (result != null && !isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return buildPartial();
+      }
+      
+      private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        if (!isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return buildPartial();
+      }
+      
+      public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema buildPartial() {
+        if (result == null) {
+          throw new IllegalStateException(
+            "build() has already been called on this Builder.");
+        }
+        if (result.attrs_ != java.util.Collections.EMPTY_LIST) {
+          result.attrs_ =
+            java.util.Collections.unmodifiableList(result.attrs_);
+        }
+        if (result.columns_ != java.util.Collections.EMPTY_LIST) {
+          result.columns_ =
+            java.util.Collections.unmodifiableList(result.columns_);
+        }
+        org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema returnMe = result;
+        result = null;
+        return returnMe;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema) {
+          return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema other) {
+        if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance()) return this;
+        if (other.hasName()) {
+          setName(other.getName());
+        }
+        if (!other.attrs_.isEmpty()) {
+          if (result.attrs_.isEmpty()) {
+            result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
+          }
+          result.attrs_.addAll(other.attrs_);
+        }
+        if (!other.columns_.isEmpty()) {
+          if (result.columns_.isEmpty()) {
+            result.columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
+          }
+          result.columns_.addAll(other.columns_);
+        }
+        if (other.hasInMemory()) {
+          setInMemory(other.getInMemory());
+        }
+        if (other.hasReadOnly()) {
+          setReadOnly(other.getReadOnly());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                return this;
+              }
+              break;
+            }
+            case 10: {
+              setName(input.readString());
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder();
+              input.readMessage(subBuilder, extensionRegistry);
+              addAttrs(subBuilder.buildPartial());
+              break;
+            }
+            case 26: {
+              org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder();
+              input.readMessage(subBuilder, extensionRegistry);
+              addColumns(subBuilder.buildPartial());
+              break;
+            }
+            case 32: {
+              setInMemory(input.readBool());
+              break;
+            }
+            case 40: {
+              setReadOnly(input.readBool());
+              break;
+            }
+          }
+        }
+      }
+      
+      
+      // optional string name = 1;
+      public boolean hasName() {
+        return result.hasName();
+      }
+      public java.lang.String getName() {
+        return result.getName();
+      }
+      public Builder setName(java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  result.hasName = true;
+        result.name_ = value;
+        return this;
+      }
+      public Builder clearName() {
+        result.hasName = false;
+        result.name_ = getDefaultInstance().getName();
+        return this;
+      }
+      
+      // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute attrs = 2;
+      public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> getAttrsList() {
+        return java.util.Collections.unmodifiableList(result.attrs_);
+      }
+      public int getAttrsCount() {
+        return result.getAttrsCount();
+      }
+      public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
+        return result.getAttrs(index);
+      }
+      public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        result.attrs_.set(index, value);
+        return this;
+      }
+      public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+        result.attrs_.set(index, builderForValue.build());
+        return this;
+      }
+      public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        if (result.attrs_.isEmpty()) {
+          result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
+        }
+        result.attrs_.add(value);
+        return this;
+      }
+      public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+        if (result.attrs_.isEmpty()) {
+          result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
+        }
+        result.attrs_.add(builderForValue.build());
+        return this;
+      }
+      public Builder addAllAttrs(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> values) {
+        if (result.attrs_.isEmpty()) {
+          result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
+        }
+        super.addAll(values, result.attrs_);
+        return this;
+      }
+      public Builder clearAttrs() {
+        result.attrs_ = java.util.Collections.emptyList();
+        return this;
+      }
+      
+      // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema columns = 3;
+      public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> getColumnsList() {
+        return java.util.Collections.unmodifiableList(result.columns_);
+      }
+      public int getColumnsCount() {
+        return result.getColumnsCount();
+      }
+      public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
+        return result.getColumns(index);
+      }
+      public Builder setColumns(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        result.columns_.set(index, value);
+        return this;
+      }
+      public Builder setColumns(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+        result.columns_.set(index, builderForValue.build());
+        return this;
+      }
+      public Builder addColumns(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        if (result.columns_.isEmpty()) {
+          result.columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
+        }
+        result.columns_.add(value);
+        return this;
+      }
+      public Builder addColumns(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+        if (result.columns_.isEmpty()) {
+          result.columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
+        }
+        result.columns_.add(builderForValue.build());
+        return this;
+      }
+      public Builder addAllColumns(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> values) {
+        if (result.columns_.isEmpty()) {
+          result.columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
+        }
+        super.addAll(values, result.columns_);
+        return this;
+      }
+      public Builder clearColumns() {
+        result.columns_ = java.util.Collections.emptyList();
+        return this;
+      }
+      
+      // optional bool inMemory = 4;
+      public boolean hasInMemory() {
+        return result.hasInMemory();
+      }
+      public boolean getInMemory() {
+        return result.getInMemory();
+      }
+      public Builder setInMemory(boolean value) {
+        result.hasInMemory = true;
+        result.inMemory_ = value;
+        return this;
+      }
+      public Builder clearInMemory() {
+        result.hasInMemory = false;
+        result.inMemory_ = false;
+        return this;
+      }
+      
+      // optional bool readOnly = 5;
+      public boolean hasReadOnly() {
+        return result.hasReadOnly();
+      }
+      public boolean getReadOnly() {
+        return result.getReadOnly();
+      }
+      public Builder setReadOnly(boolean value) {
+        result.hasReadOnly = true;
+        result.readOnly_ = value;
+        return this;
+      }
+      public Builder clearReadOnly() {
+        result.hasReadOnly = false;
+        result.readOnly_ = false;
+        return this;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema)
+    }
+    
+    static {
+      defaultInstance = new TableSchema(true);
+      org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internalForceInit();
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema)
+  }
+  
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
+  
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\030TableSchemaMessage.proto\0223org.apache.h" +
+      "adoop.hbase.stargate.protobuf.generated\032" +
+      "\031ColumnSchemaMessage.proto\"\230\002\n\013TableSche" +
+      "ma\022\014\n\004name\030\001 \001(\t\022Y\n\005attrs\030\002 \003(\0132J.org.ap" +
+      "ache.hadoop.hbase.stargate.protobuf.gene" +
+      "rated.TableSchema.Attribute\022R\n\007columns\030\003" +
+      " \003(\0132A.org.apache.hadoop.hbase.stargate." +
+      "protobuf.generated.ColumnSchema\022\020\n\010inMem" +
+      "ory\030\004 \001(\010\022\020\n\010readOnly\030\005 \001(\010\032(\n\tAttribute" +
+      "\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor,
+              new java.lang.String[] { "Name", "Attrs", "Columns", "InMemory", "ReadOnly", },
+              org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.class,
+              org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Builder.class);
+          internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor =
+            internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor.getNestedTypes().get(0);
+          internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor,
+              new java.lang.String[] { "Name", "Value", },
+              org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.class,
+              org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder.class);
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+          org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.getDescriptor(),
+        }, assigner);
+  }
+  
+  public static void internalForceInit() {}
+  
+  // @@protoc_insertion_point(outer_class_scope)
+}

Added: hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java?rev=928876&view=auto
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java (added)
+++ hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java Mon Mar 29 19:15:15 2010
@@ -0,0 +1,511 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: VersionMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class VersionMessage {
+  private VersionMessage() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public static final class Version extends
+      com.google.protobuf.GeneratedMessage {
+    // Use Version.newBuilder() to construct.
+    private Version() {
+      initFields();
+    }
+    private Version(boolean noInit) {}
+    
+    private static final Version defaultInstance;
+    public static Version getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public Version getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable;
+    }
+    
+    // optional string stargateVersion = 1;
+    public static final int STARGATEVERSION_FIELD_NUMBER = 1;
+    private boolean hasStargateVersion;
+    private java.lang.String stargateVersion_ = "";
+    public boolean hasStargateVersion() { return hasStargateVersion; }
+    public java.lang.String getStargateVersion() { return stargateVersion_; }
+    
+    // optional string jvmVersion = 2;
+    public static final int JVMVERSION_FIELD_NUMBER = 2;
+    private boolean hasJvmVersion;
+    private java.lang.String jvmVersion_ = "";
+    public boolean hasJvmVersion() { return hasJvmVersion; }
+    public java.lang.String getJvmVersion() { return jvmVersion_; }
+    
+    // optional string osVersion = 3;
+    public static final int OSVERSION_FIELD_NUMBER = 3;
+    private boolean hasOsVersion;
+    private java.lang.String osVersion_ = "";
+    public boolean hasOsVersion() { return hasOsVersion; }
+    public java.lang.String getOsVersion() { return osVersion_; }
+    
+    // optional string serverVersion = 4;
+    public static final int SERVERVERSION_FIELD_NUMBER = 4;
+    private boolean hasServerVersion;
+    private java.lang.String serverVersion_ = "";
+    public boolean hasServerVersion() { return hasServerVersion; }
+    public java.lang.String getServerVersion() { return serverVersion_; }
+    
+    // optional string jerseyVersion = 5;
+    public static final int JERSEYVERSION_FIELD_NUMBER = 5;
+    private boolean hasJerseyVersion;
+    private java.lang.String jerseyVersion_ = "";
+    public boolean hasJerseyVersion() { return hasJerseyVersion; }
+    public java.lang.String getJerseyVersion() { return jerseyVersion_; }
+    
+    private void initFields() {
+    }
+    public final boolean isInitialized() {
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (hasStargateVersion()) {
+        output.writeString(1, getStargateVersion());
+      }
+      if (hasJvmVersion()) {
+        output.writeString(2, getJvmVersion());
+      }
+      if (hasOsVersion()) {
+        output.writeString(3, getOsVersion());
+      }
+      if (hasServerVersion()) {
+        output.writeString(4, getServerVersion());
+      }
+      if (hasJerseyVersion()) {
+        output.writeString(5, getJerseyVersion());
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (hasStargateVersion()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeStringSize(1, getStargateVersion());
+      }
+      if (hasJvmVersion()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeStringSize(2, getJvmVersion());
+      }
+      if (hasOsVersion()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeStringSize(3, getOsVersion());
+      }
+      if (hasServerVersion()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeStringSize(4, getServerVersion());
+      }
+      if (hasJerseyVersion()) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeStringSize(5, getJerseyVersion());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder> {
+      private org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version result;
+      
+      // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.newBuilder()
+      private Builder() {}
+      
+      private static Builder create() {
+        Builder builder = new Builder();
+        builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version();
+        return builder;
+      }
+      
+      protected org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version internalGetResult() {
+        return result;
+      }
+      
+      public Builder clear() {
+        if (result == null) {
+          throw new IllegalStateException(
+            "Cannot call clear() after build().");
+        }
+        result = new org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version();
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(result);
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDescriptor();
+      }
+      
+      public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDefaultInstance();
+      }
+      
+      public boolean isInitialized() {
+        return result.isInitialized();
+      }
+      public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version build() {
+        if (result != null && !isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return buildPartial();
+      }
+      
+      private org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        if (!isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return buildPartial();
+      }
+      
+      public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version buildPartial() {
+        if (result == null) {
+          throw new IllegalStateException(
+            "build() has already been called on this Builder.");
+        }
+        org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version returnMe = result;
+        result = null;
+        return returnMe;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version) {
+          return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version other) {
+        if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDefaultInstance()) return this;
+        if (other.hasStargateVersion()) {
+          setStargateVersion(other.getStargateVersion());
+        }
+        if (other.hasJvmVersion()) {
+          setJvmVersion(other.getJvmVersion());
+        }
+        if (other.hasOsVersion()) {
+          setOsVersion(other.getOsVersion());
+        }
+        if (other.hasServerVersion()) {
+          setServerVersion(other.getServerVersion());
+        }
+        if (other.hasJerseyVersion()) {
+          setJerseyVersion(other.getJerseyVersion());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                return this;
+              }
+              break;
+            }
+            case 10: {
+              setStargateVersion(input.readString());
+              break;
+            }
+            case 18: {
+              setJvmVersion(input.readString());
+              break;
+            }
+            case 26: {
+              setOsVersion(input.readString());
+              break;
+            }
+            case 34: {
+              setServerVersion(input.readString());
+              break;
+            }
+            case 42: {
+              setJerseyVersion(input.readString());
+              break;
+            }
+          }
+        }
+      }
+      
+      
+      // optional string stargateVersion = 1;
+      public boolean hasStargateVersion() {
+        return result.hasStargateVersion();
+      }
+      public java.lang.String getStargateVersion() {
+        return result.getStargateVersion();
+      }
+      public Builder setStargateVersion(java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  result.hasStargateVersion = true;
+        result.stargateVersion_ = value;
+        return this;
+      }
+      public Builder clearStargateVersion() {
+        result.hasStargateVersion = false;
+        result.stargateVersion_ = getDefaultInstance().getStargateVersion();
+        return this;
+      }
+      
+      // optional string jvmVersion = 2;
+      public boolean hasJvmVersion() {
+        return result.hasJvmVersion();
+      }
+      public java.lang.String getJvmVersion() {
+        return result.getJvmVersion();
+      }
+      public Builder setJvmVersion(java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  result.hasJvmVersion = true;
+        result.jvmVersion_ = value;
+        return this;
+      }
+      public Builder clearJvmVersion() {
+        result.hasJvmVersion = false;
+        result.jvmVersion_ = getDefaultInstance().getJvmVersion();
+        return this;
+      }
+      
+      // optional string osVersion = 3;
+      public boolean hasOsVersion() {
+        return result.hasOsVersion();
+      }
+      public java.lang.String getOsVersion() {
+        return result.getOsVersion();
+      }
+      public Builder setOsVersion(java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  result.hasOsVersion = true;
+        result.osVersion_ = value;
+        return this;
+      }
+      public Builder clearOsVersion() {
+        result.hasOsVersion = false;
+        result.osVersion_ = getDefaultInstance().getOsVersion();
+        return this;
+      }
+      
+      // optional string serverVersion = 4;
+      public boolean hasServerVersion() {
+        return result.hasServerVersion();
+      }
+      public java.lang.String getServerVersion() {
+        return result.getServerVersion();
+      }
+      public Builder setServerVersion(java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  result.hasServerVersion = true;
+        result.serverVersion_ = value;
+        return this;
+      }
+      public Builder clearServerVersion() {
+        result.hasServerVersion = false;
+        result.serverVersion_ = getDefaultInstance().getServerVersion();
+        return this;
+      }
+      
+      // optional string jerseyVersion = 5;
+      public boolean hasJerseyVersion() {
+        return result.hasJerseyVersion();
+      }
+      public java.lang.String getJerseyVersion() {
+        return result.getJerseyVersion();
+      }
+      public Builder setJerseyVersion(java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  result.hasJerseyVersion = true;
+        result.jerseyVersion_ = value;
+        return this;
+      }
+      public Builder clearJerseyVersion() {
+        result.hasJerseyVersion = false;
+        result.jerseyVersion_ = getDefaultInstance().getJerseyVersion();
+        return this;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.Version)
+    }
+    
+    static {
+      defaultInstance = new Version(true);
+      org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internalForceInit();
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.Version)
+  }
+  
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable;
+  
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\024VersionMessage.proto\0223org.apache.hadoo" +
+      "p.hbase.stargate.protobuf.generated\"w\n\007V" +
+      "ersion\022\027\n\017stargateVersion\030\001 \001(\t\022\022\n\njvmVe" +
+      "rsion\030\002 \001(\t\022\021\n\tosVersion\030\003 \001(\t\022\025\n\rserver" +
+      "Version\030\004 \001(\t\022\025\n\rjerseyVersion\030\005 \001(\t"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor,
+              new java.lang.String[] { "StargateVersion", "JvmVersion", "OsVersion", "ServerVersion", "JerseyVersion", },
+              org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.class,
+              org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.Builder.class);
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+  }
+  
+  public static void internalForceInit() {}
+  
+  // @@protoc_insertion_point(outer_class_scope)
+}

Added: hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java?rev=928876&view=auto
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java (added)
+++ hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java Mon Mar 29 19:15:15 2010
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.ws.rs.ext.ContextResolver;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
+import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
+import org.apache.hadoop.hbase.stargate.model.TableListModel;
+import org.apache.hadoop.hbase.stargate.model.TableModel;
+import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
+import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.VersionModel;
+
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+
+/**
+ * Plumbing for hooking up Jersey's JSON entity body encoding and decoding
+ * support to JAXB. Modify how the context is created (by using e.g. a 
+ * different configuration builder) to control how JSON is processed and
+ * created.
+ */
+@Provider
+public class JAXBContextResolver implements ContextResolver<JAXBContext> {
+
+	private final JAXBContext context;
+
+	private final Set<Class<?>> types;
+
+	private final Class<?>[] cTypes = {
+	  CellModel.class,
+    CellSetModel.class,
+    ColumnSchemaModel.class,
+    RowModel.class,
+    ScannerModel.class,
+    StorageClusterStatusModel.class,
+    StorageClusterVersionModel.class,
+    TableInfoModel.class,
+	  TableListModel.class,
+	  TableModel.class,
+	  TableRegionModel.class,
+	  TableSchemaModel.class,
+	  VersionModel.class
+	};
+
+	@SuppressWarnings("unchecked")
+  public JAXBContextResolver() throws Exception {
+		this.types = new HashSet(Arrays.asList(cTypes));
+		this.context = new JSONJAXBContext(JSONConfiguration.natural().build(),
+		  cTypes);
+	}
+
+	@Override
+	public JAXBContext getContext(Class<?> objectType) {
+		return (types.contains(objectType)) ? context : null;
+  }
+}

Added: hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java?rev=928876&view=auto
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java (added)
+++ hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java Mon Mar 29 19:15:15 2010
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider.consumer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyReader;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+
+/**
+ * Adapter for hooking up Jersey content processing dispatch to
+ * ProtobufMessageHandler interface capable handlers for decoding protobuf input.
+ */
+@Provider
+@Consumes(Constants.MIMETYPE_PROTOBUF)
+public class ProtobufMessageBodyConsumer 
+    implements MessageBodyReader<ProtobufMessageHandler> {
+  private static final Log LOG =
+    LogFactory.getLog(ProtobufMessageBodyConsumer.class);
+
+  @Override
+  public boolean isReadable(Class<?> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType) {
+    return ProtobufMessageHandler.class.isAssignableFrom(type);
+  }
+
+  @Override
+  public ProtobufMessageHandler readFrom(Class<ProtobufMessageHandler> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType,
+      MultivaluedMap<String, String> httpHeaders, InputStream inputStream)
+      throws IOException, WebApplicationException {
+    ProtobufMessageHandler obj = null;
+    try {
+      obj = type.newInstance();
+      ByteArrayOutputStream baos = new ByteArrayOutputStream();
+      byte[] buffer = new byte[4096];
+      int read;
+      do {
+        read = inputStream.read(buffer, 0, buffer.length);
+        if (read > 0) {
+          baos.write(buffer, 0, read);
+        }
+      } while (read > 0);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(getClass() + ": read " + baos.size() + " bytes from " +
+          inputStream);
+      }
+      obj = obj.getObjectFromMessage(baos.toByteArray());
+    } catch (InstantiationException e) {
+      throw new WebApplicationException(e);
+    } catch (IllegalAccessException e) {
+      throw new WebApplicationException(e);
+    }
+    return obj;
+  }
+}

Added: hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java?rev=928876&view=auto
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java (added)
+++ hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java Mon Mar 29 19:15:15 2010
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider.producer;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.Map;
+import java.util.WeakHashMap;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.hbase.stargate.Constants;
+
+/**
+ * An adapter between Jersey and Object.toString(). Hooks up plain text output
+ * to the Jersey content handling framework. 
+ * Jersey will first call getSize() to learn the number of bytes that will be
+ * sent, then writeTo to perform the actual I/O.
+ */
+@Provider
+@Produces(Constants.MIMETYPE_TEXT)
+public class PlainTextMessageBodyProducer 
+  implements MessageBodyWriter<Object> {
+
+  private Map<Object, byte[]> buffer = new WeakHashMap<Object, byte[]>();
+
+  @Override
+  public boolean isWriteable(Class<?> arg0, Type arg1, Annotation[] arg2,
+      MediaType arg3) {
+    return true;
+  }
+
+	@Override
+	public long getSize(Object object, Class<?> type, Type genericType,
+			Annotation[] annotations, MediaType mediaType) {
+	  byte[] bytes = object.toString().getBytes(); 
+	  buffer.put(object, bytes);
+		return bytes.length;
+	}
+
+	@Override
+	public void writeTo(Object object, Class<?> type, Type genericType,
+			Annotation[] annotations, MediaType mediaType,
+			MultivaluedMap<String, Object> httpHeaders, OutputStream outStream)
+			throws IOException, WebApplicationException {
+		outStream.write(buffer.remove(object));
+	}	
+}

Added: hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java?rev=928876&view=auto
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java (added)
+++ hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java Mon Mar 29 19:15:15 2010
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider.producer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.Map;
+import java.util.WeakHashMap;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+
+/**
+ * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up
+ * protobuf output producing methods to the Jersey content handling framework.
+ * Jersey will first call getSize() to learn the number of bytes that will be
+ * sent, then writeTo to perform the actual I/O.
+ */
+@Provider
+@Produces(Constants.MIMETYPE_PROTOBUF)
+public class ProtobufMessageBodyProducer
+  implements MessageBodyWriter<ProtobufMessageHandler> {
+
+  private Map<Object, byte[]> buffer = new WeakHashMap<Object, byte[]>();
+
+	@Override
+	public boolean isWriteable(Class<?> type, Type genericType, 
+	  Annotation[] annotations, MediaType mediaType) {
+      return ProtobufMessageHandler.class.isAssignableFrom(type);
+  }
+
+	@Override
+	public long getSize(ProtobufMessageHandler m, Class<?> type, Type genericType,
+	    Annotation[] annotations, MediaType mediaType) {
+	  ByteArrayOutputStream baos = new ByteArrayOutputStream();
+	  try {
+	    baos.write(m.createProtobufOutput());
+	  } catch (IOException e) {
+	    return -1;
+	  }
+	  byte[] bytes = baos.toByteArray();
+	  buffer.put(m, bytes);
+	  return bytes.length;
+	}
+
+	public void writeTo(ProtobufMessageHandler m, Class<?> type, Type genericType,
+	    Annotation[] annotations, MediaType mediaType, 
+	    MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream) 
+	    throws IOException, WebApplicationException {
+	  entityStream.write(buffer.remove(m));
+	}
+}

Added: hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/HTableTokenBucket.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/HTableTokenBucket.java?rev=928876&view=auto
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/HTableTokenBucket.java (added)
+++ hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/HTableTokenBucket.java Mon Mar 29 19:15:15 2010
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.util;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowLock;
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * A HTable-backed token bucket.
+ * <p>
+ * Can be configured with <t>rate</t>, the number of tokens to add to the
+ * bucket each second, and <t>size</t>, the maximum number of tokens allowed
+ * to burst. Configuration is stored in the HTable adjacent to the token
+ * count and is periodically refreshed.
+ * <p>
+ * Expected columns:
+ * <p>
+ * <ul>
+ *   <li>user:
+ *   <ul>
+ *     <li>user:tokens</li>
+ *     <li>user:tokens.rate</li>
+ *     <li>user:tokens.size</li>
+ *   </ul></li>
+ * </ul>
+ */
+public class HTableTokenBucket implements Constants {
+
+  static final Log LOG = LogFactory.getLog(HTableTokenBucket.class);
+
+  static final byte[] USER = Bytes.toBytes("user");
+  static final byte[] TOKENS = Bytes.toBytes("tokens");
+  static final byte[] TOKENS_RATE = Bytes.toBytes("tokens.rate");
+  static final byte[] TOKENS_SIZE = Bytes.toBytes("tokens.size");
+
+  Configuration conf;
+  String tableName;
+  HTable table;
+  byte[] row;
+  int tokens;
+  double rate = 10.0; // default, 10 ops added per second
+  int size = 100;     // burst
+  long lastUpdated = System.currentTimeMillis();
+  long configUpdateInterval;
+  long lastConfigUpdated = System.currentTimeMillis();
+
+  void updateConfig() throws IOException {
+    Get get = new Get(row);
+    get.addColumn(USER, TOKENS_RATE);
+    get.addColumn(USER, TOKENS_SIZE);
+    Result result = table.get(get);
+    byte[] value = result.getValue(USER, TOKENS_RATE);
+    if (value != null) {
+      this.rate = (int)Bytes.toDouble(value);
+    }
+    value = result.getValue(USER, TOKENS_SIZE);
+    if (value != null) {
+      this.size = (int)Bytes.toLong(value);
+    }
+  }
+
+  /**
+   * Constructor
+   * @param conf configuration
+   * @param row row key for user
+   * @throws IOException
+   */
+  public HTableTokenBucket(Configuration conf, byte[] row) 
+      throws IOException {
+    this(conf, conf.get("stargate.tb.htable.name", USERS_TABLE), row);
+  }
+
+  /**
+   * Constructor
+   * @param conf configuration
+   * @param tableName the table to use
+   * @param row row key for user
+   * @throws IOException
+   */
+  public HTableTokenBucket(Configuration conf, String tableName,
+      byte[] row) throws IOException {
+    this.conf = conf;
+    this.tableName = tableName;
+    this.row = row;
+    this.table = new HTable(conf, tableName);
+    this.configUpdateInterval = 
+      conf.getLong("stargate.tb.update.interval", 1000 * 60);
+    updateConfig();
+  }
+
+  /**
+   * @return the number of remaining tokens in the bucket (roughly)
+   * @throws IOException
+   */
+  public int available() throws IOException {
+    long now = System.currentTimeMillis();
+    if (now - lastConfigUpdated > configUpdateInterval) {
+      try {
+        updateConfig();
+      } catch (IOException e) { 
+        LOG.warn(StringUtils.stringifyException(e));
+      }
+      lastConfigUpdated = now;
+    }
+
+    // We can't simply use incrementColumnValue here because the timestamp of
+    // the keyvalue will not be changed as long as it remains in memstore, so
+    // there will be some unavoidable contention on the row if multiple 
+    // Stargate instances are concurrently serving the same user, and three
+    // more round trips than otherwise.
+    RowLock rl = table.lockRow(row);
+    try {
+      Get get = new Get(row, rl);
+      get.addColumn(USER, TOKENS);
+      List<KeyValue> kvs = table.get(get).list();
+      if (kvs != null && !kvs.isEmpty()) {
+        KeyValue kv = kvs.get(0);
+        tokens = (int)Bytes.toLong(kv.getValue());
+        lastUpdated = kv.getTimestamp();
+      } else {
+        tokens = (int)rate;
+      }
+      long elapsed = now - lastUpdated;
+      int i = (int)((elapsed / 1000) * rate); // convert sec <-> ms
+      if (tokens + i > size) {
+        i = size - tokens;
+      }
+      if (i > 0) {
+        tokens += i;
+        Put put = new Put(row, rl);
+        put.add(USER, TOKENS, Bytes.toBytes((long)tokens));
+        put.setWriteToWAL(false);
+        table.put(put);
+        table.flushCommits();
+      }
+    } finally {
+      table.unlockRow(rl);
+    }
+    return tokens;
+  }
+
+  /**
+   * @param t the number of tokens to consume from the bucket
+   * @throws IOException
+   */
+  public void remove(int t) throws IOException {
+    // Here we don't care about timestamp changes; actually it's advantageous
+    // if they are not updated, otherwise available() and remove() must be
+    // used as near to each other in time as possible.
+    table.incrementColumnValue(row, USER, TOKENS, (long) -t, false);
+  }
+
+  public double getRate() {
+    return rate;
+  }
+
+  public int getSize() {
+    return size;
+  }
+
+}

Added: hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/SoftUserData.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/SoftUserData.java?rev=928876&view=auto
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/SoftUserData.java (added)
+++ hadoop/hbase/trunk/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/SoftUserData.java Mon Mar 29 19:15:15 2010
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.util;
+
+import java.util.Map;
+
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.util.SoftValueMap;
+
+/**
+ * Provides a softmap backed collection of user data. The collection can be
+ * reclaimed by the garbage collector at any time when under heap pressure.
+ */
+public class SoftUserData extends UserData {
+
+  static final Map<User,UserData> map = new SoftValueMap<User,UserData>();
+
+  public static synchronized UserData get(final User user) {
+    UserData data = map.get(user);
+    if (data == null) {
+      data = new UserData();
+      map.put(user, data);
+    }
+    return data;
+  }
+
+  public static synchronized UserData put(final User user,
+      final UserData data) {
+    return map.put(user, data);
+  }
+
+}



Mime
View raw message