Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 5F4BC10180 for ; Wed, 24 Apr 2013 04:46:40 +0000 (UTC) Received: (qmail 58996 invoked by uid 500); 24 Apr 2013 04:46:40 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 58943 invoked by uid 500); 24 Apr 2013 04:46:40 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 58935 invoked by uid 99); 24 Apr 2013 04:46:40 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 24 Apr 2013 04:46:40 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 24 Apr 2013 04:46:35 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 2E1042388AAA; Wed, 24 Apr 2013 04:45:47 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1471247 [6/7] - in /hbase/trunk: bin/ dev-support/ hbase-examples/thrift2/ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/ hbase-server/src/main/java/org/apache/hado... Date: Wed, 24 Apr 2013 04:45:45 -0000 To: commits@hbase.apache.org From: stack@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130424044547.2E1042388AAA@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java?rev=1471247&r1=1471246&r2=1471247&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java (original) +++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java Wed Apr 24 04:45:44 2013 @@ -0,0 +1,553 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.thrift2.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * if no Result is found, row and columnValues will not be set. + */ +public class TResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TResult"); + + private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField COLUMN_VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("columnValues", org.apache.thrift.protocol.TType.LIST, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TResultTupleSchemeFactory()); + } + + public ByteBuffer row; // optional + public List columnValues; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + ROW((short)1, "row"), + COLUMN_VALUES((short)2, "columnValues"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // ROW + return ROW; + case 2: // COLUMN_VALUES + return COLUMN_VALUES; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private _Fields optionals[] = {_Fields.ROW}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.COLUMN_VALUES, new org.apache.thrift.meta_data.FieldMetaData("columnValues", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnValue.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TResult.class, metaDataMap); + } + + public TResult() { + } + + public TResult( + List columnValues) + { + this(); + this.columnValues = columnValues; + } + + /** + * Performs a deep copy on other. + */ + public TResult(TResult other) { + if (other.isSetRow()) { + this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row); +; + } + if (other.isSetColumnValues()) { + List __this__columnValues = new ArrayList(); + for (TColumnValue other_element : other.columnValues) { + __this__columnValues.add(new TColumnValue(other_element)); + } + this.columnValues = __this__columnValues; + } + } + + public TResult deepCopy() { + return new TResult(this); + } + + @Override + public void clear() { + this.row = null; + this.columnValues = null; + } + + public byte[] getRow() { + setRow(org.apache.thrift.TBaseHelper.rightSize(row)); + return row == null ? null : row.array(); + } + + public ByteBuffer bufferForRow() { + return row; + } + + public TResult setRow(byte[] row) { + setRow(row == null ? (ByteBuffer)null : ByteBuffer.wrap(row)); + return this; + } + + public TResult setRow(ByteBuffer row) { + this.row = row; + return this; + } + + public void unsetRow() { + this.row = null; + } + + /** Returns true if field row is set (has been assigned a value) and false otherwise */ + public boolean isSetRow() { + return this.row != null; + } + + public void setRowIsSet(boolean value) { + if (!value) { + this.row = null; + } + } + + public int getColumnValuesSize() { + return (this.columnValues == null) ? 0 : this.columnValues.size(); + } + + public java.util.Iterator getColumnValuesIterator() { + return (this.columnValues == null) ? null : this.columnValues.iterator(); + } + + public void addToColumnValues(TColumnValue elem) { + if (this.columnValues == null) { + this.columnValues = new ArrayList(); + } + this.columnValues.add(elem); + } + + public List getColumnValues() { + return this.columnValues; + } + + public TResult setColumnValues(List columnValues) { + this.columnValues = columnValues; + return this; + } + + public void unsetColumnValues() { + this.columnValues = null; + } + + /** Returns true if field columnValues is set (has been assigned a value) and false otherwise */ + public boolean isSetColumnValues() { + return this.columnValues != null; + } + + public void setColumnValuesIsSet(boolean value) { + if (!value) { + this.columnValues = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case ROW: + if (value == null) { + unsetRow(); + } else { + setRow((ByteBuffer)value); + } + break; + + case COLUMN_VALUES: + if (value == null) { + unsetColumnValues(); + } else { + setColumnValues((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case ROW: + return getRow(); + + case COLUMN_VALUES: + return getColumnValues(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case ROW: + return isSetRow(); + case COLUMN_VALUES: + return isSetColumnValues(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TResult) + return this.equals((TResult)that); + return false; + } + + public boolean equals(TResult that) { + if (that == null) + return false; + + boolean this_present_row = true && this.isSetRow(); + boolean that_present_row = true && that.isSetRow(); + if (this_present_row || that_present_row) { + if (!(this_present_row && that_present_row)) + return false; + if (!this.row.equals(that.row)) + return false; + } + + boolean this_present_columnValues = true && this.isSetColumnValues(); + boolean that_present_columnValues = true && that.isSetColumnValues(); + if (this_present_columnValues || that_present_columnValues) { + if (!(this_present_columnValues && that_present_columnValues)) + return false; + if (!this.columnValues.equals(that.columnValues)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(TResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TResult typedOther = (TResult)other; + + lastComparison = Boolean.valueOf(isSetRow()).compareTo(typedOther.isSetRow()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRow()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row, typedOther.row); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetColumnValues()).compareTo(typedOther.isSetColumnValues()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetColumnValues()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnValues, typedOther.columnValues); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TResult("); + boolean first = true; + + if (isSetRow()) { + sb.append("row:"); + if (this.row == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.row, sb); + } + first = false; + } + if (!first) sb.append(", "); + sb.append("columnValues:"); + if (this.columnValues == null) { + sb.append("null"); + } else { + sb.append(this.columnValues); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (columnValues == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'columnValues' was not present! Struct: " + toString()); + } + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TResultStandardSchemeFactory implements SchemeFactory { + public TResultStandardScheme getScheme() { + return new TResultStandardScheme(); + } + } + + private static class TResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // ROW + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.row = iprot.readBinary(); + struct.setRowIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // COLUMN_VALUES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); + struct.columnValues = new ArrayList(_list0.size); + for (int _i1 = 0; _i1 < _list0.size; ++_i1) + { + TColumnValue _elem2; // required + _elem2 = new TColumnValue(); + _elem2.read(iprot); + struct.columnValues.add(_elem2); + } + iprot.readListEnd(); + } + struct.setColumnValuesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.row != null) { + if (struct.isSetRow()) { + oprot.writeFieldBegin(ROW_FIELD_DESC); + oprot.writeBinary(struct.row); + oprot.writeFieldEnd(); + } + } + if (struct.columnValues != null) { + oprot.writeFieldBegin(COLUMN_VALUES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columnValues.size())); + for (TColumnValue _iter3 : struct.columnValues) + { + _iter3.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TResultTupleSchemeFactory implements SchemeFactory { + public TResultTupleScheme getScheme() { + return new TResultTupleScheme(); + } + } + + private static class TResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.columnValues.size()); + for (TColumnValue _iter4 : struct.columnValues) + { + _iter4.write(oprot); + } + } + BitSet optionals = new BitSet(); + if (struct.isSetRow()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRow()) { + oprot.writeBinary(struct.row); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.columnValues = new ArrayList(_list5.size); + for (int _i6 = 0; _i6 < _list5.size; ++_i6) + { + TColumnValue _elem7; // required + _elem7 = new TColumnValue(); + _elem7.read(iprot); + struct.columnValues.add(_elem7); + } + } + struct.setColumnValuesIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.row = iprot.readBinary(); + struct.setRowIsSet(true); + } + } + } + +} + Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java?rev=1471247&r1=1471246&r2=1471247&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java (original) +++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java Wed Apr 24 04:45:44 2013 @@ -0,0 +1,971 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.thrift2.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Any timestamps in the columns are ignored, use timeRange to select by timestamp. + * Max versions defaults to 1. + */ +public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan"); + + private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC = new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.I32, (short)4); + private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32, (short)5); + private static final org.apache.thrift.protocol.TField TIME_RANGE_FIELD_DESC = new org.apache.thrift.protocol.TField("timeRange", org.apache.thrift.protocol.TType.STRUCT, (short)6); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TScanStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TScanTupleSchemeFactory()); + } + + public ByteBuffer startRow; // optional + public ByteBuffer stopRow; // optional + public List columns; // optional + public int caching; // optional + public int maxVersions; // optional + public TTimeRange timeRange; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + START_ROW((short)1, "startRow"), + STOP_ROW((short)2, "stopRow"), + COLUMNS((short)3, "columns"), + CACHING((short)4, "caching"), + MAX_VERSIONS((short)5, "maxVersions"), + TIME_RANGE((short)6, "timeRange"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // START_ROW + return START_ROW; + case 2: // STOP_ROW + return STOP_ROW; + case 3: // COLUMNS + return COLUMNS; + case 4: // CACHING + return CACHING; + case 5: // MAX_VERSIONS + return MAX_VERSIONS; + case 6: // TIME_RANGE + return TIME_RANGE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __CACHING_ISSET_ID = 0; + private static final int __MAXVERSIONS_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.COLUMNS,_Fields.CACHING,_Fields.MAX_VERSIONS,_Fields.TIME_RANGE}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.STOP_ROW, new org.apache.thrift.meta_data.FieldMetaData("stopRow", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumn.class)))); + tmpMap.put(_Fields.CACHING, new org.apache.thrift.meta_data.FieldMetaData("caching", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.TIME_RANGE, new org.apache.thrift.meta_data.FieldMetaData("timeRange", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTimeRange.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TScan.class, metaDataMap); + } + + public TScan() { + this.maxVersions = 1; + + } + + /** + * Performs a deep copy on other. + */ + public TScan(TScan other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetStartRow()) { + this.startRow = org.apache.thrift.TBaseHelper.copyBinary(other.startRow); +; + } + if (other.isSetStopRow()) { + this.stopRow = org.apache.thrift.TBaseHelper.copyBinary(other.stopRow); +; + } + if (other.isSetColumns()) { + List __this__columns = new ArrayList(); + for (TColumn other_element : other.columns) { + __this__columns.add(new TColumn(other_element)); + } + this.columns = __this__columns; + } + this.caching = other.caching; + this.maxVersions = other.maxVersions; + if (other.isSetTimeRange()) { + this.timeRange = new TTimeRange(other.timeRange); + } + } + + public TScan deepCopy() { + return new TScan(this); + } + + @Override + public void clear() { + this.startRow = null; + this.stopRow = null; + this.columns = null; + setCachingIsSet(false); + this.caching = 0; + this.maxVersions = 1; + + this.timeRange = null; + } + + public byte[] getStartRow() { + setStartRow(org.apache.thrift.TBaseHelper.rightSize(startRow)); + return startRow == null ? null : startRow.array(); + } + + public ByteBuffer bufferForStartRow() { + return startRow; + } + + public TScan setStartRow(byte[] startRow) { + setStartRow(startRow == null ? (ByteBuffer)null : ByteBuffer.wrap(startRow)); + return this; + } + + public TScan setStartRow(ByteBuffer startRow) { + this.startRow = startRow; + return this; + } + + public void unsetStartRow() { + this.startRow = null; + } + + /** Returns true if field startRow is set (has been assigned a value) and false otherwise */ + public boolean isSetStartRow() { + return this.startRow != null; + } + + public void setStartRowIsSet(boolean value) { + if (!value) { + this.startRow = null; + } + } + + public byte[] getStopRow() { + setStopRow(org.apache.thrift.TBaseHelper.rightSize(stopRow)); + return stopRow == null ? null : stopRow.array(); + } + + public ByteBuffer bufferForStopRow() { + return stopRow; + } + + public TScan setStopRow(byte[] stopRow) { + setStopRow(stopRow == null ? (ByteBuffer)null : ByteBuffer.wrap(stopRow)); + return this; + } + + public TScan setStopRow(ByteBuffer stopRow) { + this.stopRow = stopRow; + return this; + } + + public void unsetStopRow() { + this.stopRow = null; + } + + /** Returns true if field stopRow is set (has been assigned a value) and false otherwise */ + public boolean isSetStopRow() { + return this.stopRow != null; + } + + public void setStopRowIsSet(boolean value) { + if (!value) { + this.stopRow = null; + } + } + + public int getColumnsSize() { + return (this.columns == null) ? 0 : this.columns.size(); + } + + public java.util.Iterator getColumnsIterator() { + return (this.columns == null) ? null : this.columns.iterator(); + } + + public void addToColumns(TColumn elem) { + if (this.columns == null) { + this.columns = new ArrayList(); + } + this.columns.add(elem); + } + + public List getColumns() { + return this.columns; + } + + public TScan setColumns(List columns) { + this.columns = columns; + return this; + } + + public void unsetColumns() { + this.columns = null; + } + + /** Returns true if field columns is set (has been assigned a value) and false otherwise */ + public boolean isSetColumns() { + return this.columns != null; + } + + public void setColumnsIsSet(boolean value) { + if (!value) { + this.columns = null; + } + } + + public int getCaching() { + return this.caching; + } + + public TScan setCaching(int caching) { + this.caching = caching; + setCachingIsSet(true); + return this; + } + + public void unsetCaching() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CACHING_ISSET_ID); + } + + /** Returns true if field caching is set (has been assigned a value) and false otherwise */ + public boolean isSetCaching() { + return EncodingUtils.testBit(__isset_bitfield, __CACHING_ISSET_ID); + } + + public void setCachingIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CACHING_ISSET_ID, value); + } + + public int getMaxVersions() { + return this.maxVersions; + } + + public TScan setMaxVersions(int maxVersions) { + this.maxVersions = maxVersions; + setMaxVersionsIsSet(true); + return this; + } + + public void unsetMaxVersions() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID); + } + + /** Returns true if field maxVersions is set (has been assigned a value) and false otherwise */ + public boolean isSetMaxVersions() { + return EncodingUtils.testBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID); + } + + public void setMaxVersionsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value); + } + + public TTimeRange getTimeRange() { + return this.timeRange; + } + + public TScan setTimeRange(TTimeRange timeRange) { + this.timeRange = timeRange; + return this; + } + + public void unsetTimeRange() { + this.timeRange = null; + } + + /** Returns true if field timeRange is set (has been assigned a value) and false otherwise */ + public boolean isSetTimeRange() { + return this.timeRange != null; + } + + public void setTimeRangeIsSet(boolean value) { + if (!value) { + this.timeRange = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case START_ROW: + if (value == null) { + unsetStartRow(); + } else { + setStartRow((ByteBuffer)value); + } + break; + + case STOP_ROW: + if (value == null) { + unsetStopRow(); + } else { + setStopRow((ByteBuffer)value); + } + break; + + case COLUMNS: + if (value == null) { + unsetColumns(); + } else { + setColumns((List)value); + } + break; + + case CACHING: + if (value == null) { + unsetCaching(); + } else { + setCaching((Integer)value); + } + break; + + case MAX_VERSIONS: + if (value == null) { + unsetMaxVersions(); + } else { + setMaxVersions((Integer)value); + } + break; + + case TIME_RANGE: + if (value == null) { + unsetTimeRange(); + } else { + setTimeRange((TTimeRange)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case START_ROW: + return getStartRow(); + + case STOP_ROW: + return getStopRow(); + + case COLUMNS: + return getColumns(); + + case CACHING: + return Integer.valueOf(getCaching()); + + case MAX_VERSIONS: + return Integer.valueOf(getMaxVersions()); + + case TIME_RANGE: + return getTimeRange(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case START_ROW: + return isSetStartRow(); + case STOP_ROW: + return isSetStopRow(); + case COLUMNS: + return isSetColumns(); + case CACHING: + return isSetCaching(); + case MAX_VERSIONS: + return isSetMaxVersions(); + case TIME_RANGE: + return isSetTimeRange(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TScan) + return this.equals((TScan)that); + return false; + } + + public boolean equals(TScan that) { + if (that == null) + return false; + + boolean this_present_startRow = true && this.isSetStartRow(); + boolean that_present_startRow = true && that.isSetStartRow(); + if (this_present_startRow || that_present_startRow) { + if (!(this_present_startRow && that_present_startRow)) + return false; + if (!this.startRow.equals(that.startRow)) + return false; + } + + boolean this_present_stopRow = true && this.isSetStopRow(); + boolean that_present_stopRow = true && that.isSetStopRow(); + if (this_present_stopRow || that_present_stopRow) { + if (!(this_present_stopRow && that_present_stopRow)) + return false; + if (!this.stopRow.equals(that.stopRow)) + return false; + } + + boolean this_present_columns = true && this.isSetColumns(); + boolean that_present_columns = true && that.isSetColumns(); + if (this_present_columns || that_present_columns) { + if (!(this_present_columns && that_present_columns)) + return false; + if (!this.columns.equals(that.columns)) + return false; + } + + boolean this_present_caching = true && this.isSetCaching(); + boolean that_present_caching = true && that.isSetCaching(); + if (this_present_caching || that_present_caching) { + if (!(this_present_caching && that_present_caching)) + return false; + if (this.caching != that.caching) + return false; + } + + boolean this_present_maxVersions = true && this.isSetMaxVersions(); + boolean that_present_maxVersions = true && that.isSetMaxVersions(); + if (this_present_maxVersions || that_present_maxVersions) { + if (!(this_present_maxVersions && that_present_maxVersions)) + return false; + if (this.maxVersions != that.maxVersions) + return false; + } + + boolean this_present_timeRange = true && this.isSetTimeRange(); + boolean that_present_timeRange = true && that.isSetTimeRange(); + if (this_present_timeRange || that_present_timeRange) { + if (!(this_present_timeRange && that_present_timeRange)) + return false; + if (!this.timeRange.equals(that.timeRange)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(TScan other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TScan typedOther = (TScan)other; + + lastComparison = Boolean.valueOf(isSetStartRow()).compareTo(typedOther.isSetStartRow()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStartRow()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.startRow, typedOther.startRow); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetStopRow()).compareTo(typedOther.isSetStopRow()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStopRow()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stopRow, typedOther.stopRow); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetColumns()).compareTo(typedOther.isSetColumns()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetColumns()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, typedOther.columns); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCaching()).compareTo(typedOther.isSetCaching()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCaching()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.caching, typedOther.caching); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMaxVersions()).compareTo(typedOther.isSetMaxVersions()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMaxVersions()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxVersions, typedOther.maxVersions); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTimeRange()).compareTo(typedOther.isSetTimeRange()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTimeRange()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timeRange, typedOther.timeRange); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TScan("); + boolean first = true; + + if (isSetStartRow()) { + sb.append("startRow:"); + if (this.startRow == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.startRow, sb); + } + first = false; + } + if (isSetStopRow()) { + if (!first) sb.append(", "); + sb.append("stopRow:"); + if (this.stopRow == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.stopRow, sb); + } + first = false; + } + if (isSetColumns()) { + if (!first) sb.append(", "); + sb.append("columns:"); + if (this.columns == null) { + sb.append("null"); + } else { + sb.append(this.columns); + } + first = false; + } + if (isSetCaching()) { + if (!first) sb.append(", "); + sb.append("caching:"); + sb.append(this.caching); + first = false; + } + if (isSetMaxVersions()) { + if (!first) sb.append(", "); + sb.append("maxVersions:"); + sb.append(this.maxVersions); + first = false; + } + if (isSetTimeRange()) { + if (!first) sb.append(", "); + sb.append("timeRange:"); + if (this.timeRange == null) { + sb.append("null"); + } else { + sb.append(this.timeRange); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (timeRange != null) { + timeRange.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TScanStandardSchemeFactory implements SchemeFactory { + public TScanStandardScheme getScheme() { + return new TScanStandardScheme(); + } + } + + private static class TScanStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // START_ROW + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.startRow = iprot.readBinary(); + struct.setStartRowIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // STOP_ROW + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.stopRow = iprot.readBinary(); + struct.setStopRowIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // COLUMNS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list40 = iprot.readListBegin(); + struct.columns = new ArrayList(_list40.size); + for (int _i41 = 0; _i41 < _list40.size; ++_i41) + { + TColumn _elem42; // required + _elem42 = new TColumn(); + _elem42.read(iprot); + struct.columns.add(_elem42); + } + iprot.readListEnd(); + } + struct.setColumnsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // CACHING + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.caching = iprot.readI32(); + struct.setCachingIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // MAX_VERSIONS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.maxVersions = iprot.readI32(); + struct.setMaxVersionsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // TIME_RANGE + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.timeRange = new TTimeRange(); + struct.timeRange.read(iprot); + struct.setTimeRangeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.startRow != null) { + if (struct.isSetStartRow()) { + oprot.writeFieldBegin(START_ROW_FIELD_DESC); + oprot.writeBinary(struct.startRow); + oprot.writeFieldEnd(); + } + } + if (struct.stopRow != null) { + if (struct.isSetStopRow()) { + oprot.writeFieldBegin(STOP_ROW_FIELD_DESC); + oprot.writeBinary(struct.stopRow); + oprot.writeFieldEnd(); + } + } + if (struct.columns != null) { + if (struct.isSetColumns()) { + oprot.writeFieldBegin(COLUMNS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); + for (TColumn _iter43 : struct.columns) + { + _iter43.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.isSetCaching()) { + oprot.writeFieldBegin(CACHING_FIELD_DESC); + oprot.writeI32(struct.caching); + oprot.writeFieldEnd(); + } + if (struct.isSetMaxVersions()) { + oprot.writeFieldBegin(MAX_VERSIONS_FIELD_DESC); + oprot.writeI32(struct.maxVersions); + oprot.writeFieldEnd(); + } + if (struct.timeRange != null) { + if (struct.isSetTimeRange()) { + oprot.writeFieldBegin(TIME_RANGE_FIELD_DESC); + struct.timeRange.write(oprot); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TScanTupleSchemeFactory implements SchemeFactory { + public TScanTupleScheme getScheme() { + return new TScanTupleScheme(); + } + } + + private static class TScanTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetStartRow()) { + optionals.set(0); + } + if (struct.isSetStopRow()) { + optionals.set(1); + } + if (struct.isSetColumns()) { + optionals.set(2); + } + if (struct.isSetCaching()) { + optionals.set(3); + } + if (struct.isSetMaxVersions()) { + optionals.set(4); + } + if (struct.isSetTimeRange()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); + if (struct.isSetStartRow()) { + oprot.writeBinary(struct.startRow); + } + if (struct.isSetStopRow()) { + oprot.writeBinary(struct.stopRow); + } + if (struct.isSetColumns()) { + { + oprot.writeI32(struct.columns.size()); + for (TColumn _iter44 : struct.columns) + { + _iter44.write(oprot); + } + } + } + if (struct.isSetCaching()) { + oprot.writeI32(struct.caching); + } + if (struct.isSetMaxVersions()) { + oprot.writeI32(struct.maxVersions); + } + if (struct.isSetTimeRange()) { + struct.timeRange.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(6); + if (incoming.get(0)) { + struct.startRow = iprot.readBinary(); + struct.setStartRowIsSet(true); + } + if (incoming.get(1)) { + struct.stopRow = iprot.readBinary(); + struct.setStopRowIsSet(true); + } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list45 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.columns = new ArrayList(_list45.size); + for (int _i46 = 0; _i46 < _list45.size; ++_i46) + { + TColumn _elem47; // required + _elem47 = new TColumn(); + _elem47.read(iprot); + struct.columns.add(_elem47); + } + } + struct.setColumnsIsSet(true); + } + if (incoming.get(3)) { + struct.caching = iprot.readI32(); + struct.setCachingIsSet(true); + } + if (incoming.get(4)) { + struct.maxVersions = iprot.readI32(); + struct.setMaxVersionsIsSet(true); + } + if (incoming.get(5)) { + struct.timeRange = new TTimeRange(); + struct.timeRange.read(iprot); + struct.setTimeRangeIsSet(true); + } + } + } + +} + Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java?rev=1471247&r1=1471246&r2=1471247&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java (original) +++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java Wed Apr 24 04:45:44 2013 @@ -0,0 +1,469 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.thrift2.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TTimeRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTimeRange"); + + private static final org.apache.thrift.protocol.TField MIN_STAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("minStamp", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField MAX_STAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("maxStamp", org.apache.thrift.protocol.TType.I64, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TTimeRangeStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TTimeRangeTupleSchemeFactory()); + } + + public long minStamp; // required + public long maxStamp; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MIN_STAMP((short)1, "minStamp"), + MAX_STAMP((short)2, "maxStamp"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // MIN_STAMP + return MIN_STAMP; + case 2: // MAX_STAMP + return MAX_STAMP; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __MINSTAMP_ISSET_ID = 0; + private static final int __MAXSTAMP_ISSET_ID = 1; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.MIN_STAMP, new org.apache.thrift.meta_data.FieldMetaData("minStamp", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.MAX_STAMP, new org.apache.thrift.meta_data.FieldMetaData("maxStamp", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTimeRange.class, metaDataMap); + } + + public TTimeRange() { + } + + public TTimeRange( + long minStamp, + long maxStamp) + { + this(); + this.minStamp = minStamp; + setMinStampIsSet(true); + this.maxStamp = maxStamp; + setMaxStampIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public TTimeRange(TTimeRange other) { + __isset_bitfield = other.__isset_bitfield; + this.minStamp = other.minStamp; + this.maxStamp = other.maxStamp; + } + + public TTimeRange deepCopy() { + return new TTimeRange(this); + } + + @Override + public void clear() { + setMinStampIsSet(false); + this.minStamp = 0; + setMaxStampIsSet(false); + this.maxStamp = 0; + } + + public long getMinStamp() { + return this.minStamp; + } + + public TTimeRange setMinStamp(long minStamp) { + this.minStamp = minStamp; + setMinStampIsSet(true); + return this; + } + + public void unsetMinStamp() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MINSTAMP_ISSET_ID); + } + + /** Returns true if field minStamp is set (has been assigned a value) and false otherwise */ + public boolean isSetMinStamp() { + return EncodingUtils.testBit(__isset_bitfield, __MINSTAMP_ISSET_ID); + } + + public void setMinStampIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MINSTAMP_ISSET_ID, value); + } + + public long getMaxStamp() { + return this.maxStamp; + } + + public TTimeRange setMaxStamp(long maxStamp) { + this.maxStamp = maxStamp; + setMaxStampIsSet(true); + return this; + } + + public void unsetMaxStamp() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXSTAMP_ISSET_ID); + } + + /** Returns true if field maxStamp is set (has been assigned a value) and false otherwise */ + public boolean isSetMaxStamp() { + return EncodingUtils.testBit(__isset_bitfield, __MAXSTAMP_ISSET_ID); + } + + public void setMaxStampIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXSTAMP_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MIN_STAMP: + if (value == null) { + unsetMinStamp(); + } else { + setMinStamp((Long)value); + } + break; + + case MAX_STAMP: + if (value == null) { + unsetMaxStamp(); + } else { + setMaxStamp((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MIN_STAMP: + return Long.valueOf(getMinStamp()); + + case MAX_STAMP: + return Long.valueOf(getMaxStamp()); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MIN_STAMP: + return isSetMinStamp(); + case MAX_STAMP: + return isSetMaxStamp(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TTimeRange) + return this.equals((TTimeRange)that); + return false; + } + + public boolean equals(TTimeRange that) { + if (that == null) + return false; + + boolean this_present_minStamp = true; + boolean that_present_minStamp = true; + if (this_present_minStamp || that_present_minStamp) { + if (!(this_present_minStamp && that_present_minStamp)) + return false; + if (this.minStamp != that.minStamp) + return false; + } + + boolean this_present_maxStamp = true; + boolean that_present_maxStamp = true; + if (this_present_maxStamp || that_present_maxStamp) { + if (!(this_present_maxStamp && that_present_maxStamp)) + return false; + if (this.maxStamp != that.maxStamp) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(TTimeRange other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TTimeRange typedOther = (TTimeRange)other; + + lastComparison = Boolean.valueOf(isSetMinStamp()).compareTo(typedOther.isSetMinStamp()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMinStamp()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.minStamp, typedOther.minStamp); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMaxStamp()).compareTo(typedOther.isSetMaxStamp()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMaxStamp()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxStamp, typedOther.maxStamp); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TTimeRange("); + boolean first = true; + + sb.append("minStamp:"); + sb.append(this.minStamp); + first = false; + if (!first) sb.append(", "); + sb.append("maxStamp:"); + sb.append(this.maxStamp); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // alas, we cannot check 'minStamp' because it's a primitive and you chose the non-beans generator. + // alas, we cannot check 'maxStamp' because it's a primitive and you chose the non-beans generator. + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TTimeRangeStandardSchemeFactory implements SchemeFactory { + public TTimeRangeStandardScheme getScheme() { + return new TTimeRangeStandardScheme(); + } + } + + private static class TTimeRangeStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TTimeRange struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MIN_STAMP + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.minStamp = iprot.readI64(); + struct.setMinStampIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // MAX_STAMP + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.maxStamp = iprot.readI64(); + struct.setMaxStampIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + if (!struct.isSetMinStamp()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'minStamp' was not found in serialized data! Struct: " + toString()); + } + if (!struct.isSetMaxStamp()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxStamp' was not found in serialized data! Struct: " + toString()); + } + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TTimeRange struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(MIN_STAMP_FIELD_DESC); + oprot.writeI64(struct.minStamp); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(MAX_STAMP_FIELD_DESC); + oprot.writeI64(struct.maxStamp); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TTimeRangeTupleSchemeFactory implements SchemeFactory { + public TTimeRangeTupleScheme getScheme() { + return new TTimeRangeTupleScheme(); + } + } + + private static class TTimeRangeTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TTimeRange struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.minStamp); + oprot.writeI64(struct.maxStamp); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TTimeRange struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.minStamp = iprot.readI64(); + struct.setMinStampIsSet(true); + struct.maxStamp = iprot.readI64(); + struct.setMaxStampIsSet(true); + } + } + +} + Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/package.html URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/package.html?rev=1471247&r1=1471246&r2=1471247&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/package.html (original) +++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/package.html Wed Apr 24 04:45:44 2013 @@ -0,0 +1,107 @@ + + + + + + + +Provides an HBase Thrift +service. + +This package contains a Thrift interface definition file for an HBase RPC +service and a Java server implementation. + +There are currently 2 thrift server implementations in HBase, the packages: + +
    +
  • org.apache.hadoop.hbase.thrift: This may one day be marked as depreceated.
  • +
  • org.apache.hadoop.hbase.thrift2: i.e. this package. This is intended to closely match to the HTable interface and + to one day supercede the older thrift (the old thrift mimics an API HBase no longer has).
  • +
+ +

What is Thrift?

+ + +

"Thrift is a software framework for scalable cross-language services +development. It combines a software stack with a code generation engine to +build services that work efficiently and seamlessly between C++, Java, Python, +PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, +and OCaml. Originally developed at Facebook, Thrift was open sourced in April +2007 and entered the Apache Incubator in May, 2008". +From http://thrift.apache.org/

+ +

Description

+ +

The HBase API is defined in the +file hbase.thrift. A server-side implementation of the API is in +org.apache.hadoop.hbase.thrift2.ThriftHBaseServiceHandler with the +server boiler plate in org.apache.hadoop.hbase.thrift2.ThriftServer. +The generated interfaces, types, and RPC utility files are checked into SVN under the +org.apache.hadoop.hbase.thrift2.generated directory. +

+

To stop, use: +

+  ./bin/hbase-daemon.sh stop thrift
+
+ +These are the command line arguments the Thrift server understands in addition to start and stop: +
+
-b, --bind
+
Address to bind the Thrift server to. Not supported by the Nonblocking and HsHa server [default: 0.0.0.0]
+ +
-p, --port
+
Port to bind to [default: 9090]
+ +
-f, --framed
+
Use framed transport (implied when using one of the non-blocking servers)
+ +
-c, --compact
+
Use the compact protocol [default: binary protocol]
+ +
-h, --help
+
Displays usage information for the Thrift server
+ +
-threadpool
+
Use the TThreadPoolServer. This is the default.
+ +
-hsha
+
Use the THsHaServer. This implies the framed transport.
+ +
-nonblocking
+
Use the TNonblockingServer. This implies the framed transport.
+
+ +

Details

+ +

HBase currently uses version 0.9.0 of Apache Thrift.

+

The files were generated by running the commands under the hbase checkout dir: +

+  thrift -strict --gen java:hashcode ./hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
+  # Move the generated files into place their expected location under hbase
+  mv gen-java/org/apache/hadoop/hbase/thrift2/generated/* hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/generated/
+  # Remove the gen-java file made by thrift
+  rm -rf gen-java
+
+ +

The 'thrift' binary is the Thrift compiler, and it is distributed separately from HBase +in a Thrift release. Additionally, specific language runtime libraries are a +part of a Thrift release. A version of the Java runtime is included in HBase via maven. +

+ + + Modified: hbase/trunk/hbase-server/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html?rev=1471247&r1=1471246&r2=1471247&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html (original) +++ hbase/trunk/hbase-server/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html Wed Apr 24 04:45:44 2013 @@ -35,6 +35,13 @@ Erlang, Perl, Haskell, C#, Cocoa, Smallt

Description

+

Important note: We tried to deprecate this Thrift interface and replace it +with the Interface defined over in the thrift2 package only this package will not die. +Folks keep adding to it and fixing it up so its around for another while until someone +takes command and drives this package out of existence replacing it w/ an Interface that +better matches the hbase API (this package was modelled on old HBase API long since dropped). +

+

The org.apache.hadoop.hbase.thrift.generated.Hbase.Iface HBase API is defined in the file Hbase.thrift (Click the former to see the thrift generated documentation of thrift interface). A server-side implementation of the API is in Modified: hbase/trunk/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift?rev=1471247&r1=1471246&r2=1471247&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift (original) +++ hbase/trunk/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift Wed Apr 24 04:45:44 2013 @@ -0,0 +1,412 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTE: The "required" and "optional" keywords for the service methods are purely for documentation + +namespace java org.apache.hadoop.hbase.thrift2.generated +namespace cpp apache.hadoop.hbase.thrift2 +namespace rb Apache.Hadoop.Hbase.Thrift2 +namespace py hbase +namespace perl Hbase + +struct TTimeRange { + 1: required i64 minStamp, + 2: required i64 maxStamp +} + +/** + * Addresses a single cell or multiple cells + * in a HBase table by column family and optionally + * a column qualifier and timestamp + */ +struct TColumn { + 1: required binary family, + 2: optional binary qualifier, + 3: optional i64 timestamp +} + +/** + * Represents a single cell and its value. + */ +struct TColumnValue { + 1: required binary family, + 2: required binary qualifier, + 3: required binary value, + 4: optional i64 timestamp +} + +/** + * Represents a single cell and the amount to increment it by + */ +struct TColumnIncrement { + 1: required binary family, + 2: required binary qualifier, + 3: optional i64 amount = 1 +} + +/** + * if no Result is found, row and columnValues will not be set. + */ +struct TResult { + 1: optional binary row, + 2: required list columnValues +} + +/** + * Specify type of delete: + * - DELETE_COLUMN means exactly one version will be removed, + * - DELETE_COLUMNS means previous versions will also be removed. + */ +enum TDeleteType { + DELETE_COLUMN = 0, + DELETE_COLUMNS = 1 +} + +/** + * Used to perform Get operations on a single row. + * + * The scope can be further narrowed down by specifying a list of + * columns or column families. + * + * To get everything for a row, instantiate a Get object with just the row to get. + * To further define the scope of what to get you can add a timestamp or time range + * with an optional maximum number of versions to return. + * + * If you specify a time range and a timestamp the range is ignored. + * Timestamps on TColumns are ignored. + * + * TODO: Filter, Locks + */ +struct TGet { + 1: required binary row, + 2: optional list columns, + + 3: optional i64 timestamp, + 4: optional TTimeRange timeRange, + + 5: optional i32 maxVersions, +} + +/** + * Used to perform Put operations for a single row. + * + * Add column values to this object and they'll be added. + * You can provide a default timestamp if the column values + * don't have one. If you don't provide a default timestamp + * the current time is inserted. + * + * You can also specify if this Put should be written + * to the write-ahead Log (WAL) or not. It defaults to true. + */ +struct TPut { + 1: required binary row, + 2: required list columnValues + 3: optional i64 timestamp, + 4: optional bool writeToWal = 1 +} + +/** + * Used to perform Delete operations on a single row. + * + * The scope can be further narrowed down by specifying a list of + * columns or column families as TColumns. + * + * Specifying only a family in a TColumn will delete the whole family. + * If a timestamp is specified all versions with a timestamp less than + * or equal to this will be deleted. If no timestamp is specified the + * current time will be used. + * + * Specifying a family and a column qualifier in a TColumn will delete only + * this qualifier. If a timestamp is specified only versions equal + * to this timestamp will be deleted. If no timestamp is specified the + * most recent version will be deleted. To delete all previous versions, + * specify the DELETE_COLUMNS TDeleteType. + * + * The top level timestamp is only used if a complete row should be deleted + * (i.e. no columns are passed) and if it is specified it works the same way + * as if you had added a TColumn for every column family and this timestamp + * (i.e. all versions older than or equal in all column families will be deleted) + * + */ +struct TDelete { + 1: required binary row, + 2: optional list columns, + 3: optional i64 timestamp, + 4: optional TDeleteType deleteType = 1, + 5: optional bool writeToWal = 1 +} + +/** + * Used to perform Increment operations for a single row. + * + * You can specify if this Increment should be written + * to the write-ahead Log (WAL) or not. It defaults to true. + */ +struct TIncrement { + 1: required binary row, + 2: required list columns, + 3: optional bool writeToWal = 1 +} + +/** + * Any timestamps in the columns are ignored, use timeRange to select by timestamp. + * Max versions defaults to 1. + */ +struct TScan { + 1: optional binary startRow, + 2: optional binary stopRow, + 3: optional list columns + 4: optional i32 caching, + 5: optional i32 maxVersions=1, + 6: optional TTimeRange timeRange, +} + +// +// Exceptions +// + +/** + * A TIOError exception signals that an error occurred communicating + * to the HBase master or a HBase region server. Also used to return + * more general HBase error conditions. + */ +exception TIOError { + 1: optional string message +} + +/** + * A TIllegalArgument exception indicates an illegal or invalid + * argument was passed into a procedure. + */ +exception TIllegalArgument { + 1: optional string message +} + +service THBaseService { + + /** + * Test for the existence of columns in the table, as specified in the TGet. + * + * @return true if the specified TGet matches one or more keys, false if not + */ + bool exists( + /** the table to check on */ + 1: required binary table, + + /** the TGet to check for */ + 2: required TGet get + ) throws (1:TIOError io) + + /** + * Method for getting data from a row. + * + * If the row cannot be found an empty Result is returned. + * This can be checked by the empty field of the TResult + * + * @return the result + */ + TResult get( + /** the table to get from */ + 1: required binary table, + + /** the TGet to fetch */ + 2: required TGet get + ) throws (1: TIOError io) + + /** + * Method for getting multiple rows. + * + * If a row cannot be found there will be a null + * value in the result list for that TGet at the + * same position. + * + * So the Results are in the same order as the TGets. + */ + list getMultiple( + /** the table to get from */ + 1: required binary table, + + /** a list of TGets to fetch, the Result list + will have the Results at corresponding positions + or null if there was an error */ + 2: required list gets + ) throws (1: TIOError io) + + /** + * Commit a TPut to a table. + */ + void put( + /** the table to put data in */ + 1: required binary table, + + /** the TPut to put */ + 2: required TPut put + ) throws (1: TIOError io) + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it adds the TPut. + * + * @return true if the new put was executed, false otherwise + */ + bool checkAndPut( + /** to check in and put to */ + 1: required binary table, + + /** row to check */ + 2: required binary row, + + /** column family to check */ + 3: required binary family, + + /** column qualifier to check */ + 4: required binary qualifier, + + /** the expected value, if not provided the + check is for the non-existence of the + column in question */ + 5: binary value, + + /** the TPut to put if the check succeeds */ + 6: required TPut put + ) throws (1: TIOError io) + + /** + * Commit a List of Puts to the table. + */ + void putMultiple( + /** the table to put data in */ + 1: required binary table, + + /** a list of TPuts to commit */ + 2: required list puts + ) throws (1: TIOError io) + + /** + * Deletes as specified by the TDelete. + * + * Note: "delete" is a reserved keyword and cannot be used in Thrift + * thus the inconsistent naming scheme from the other functions. + */ + void deleteSingle( + /** the table to delete from */ + 1: required binary table, + + /** the TDelete to delete */ + 2: required TDelete deleteSingle + ) throws (1: TIOError io) + + /** + * Bulk commit a List of TDeletes to the table. + * + * Throws a TIOError if any of the deletes fail. + * + * Always returns an empty list for backwards compatibility. + */ + list deleteMultiple( + /** the table to delete from */ + 1: required binary table, + + /** list of TDeletes to delete */ + 2: required list deletes + ) throws (1: TIOError io) + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it adds the delete. + * + * @return true if the new delete was executed, false otherwise + */ + bool checkAndDelete( + /** to check in and delete from */ + 1: required binary table, + + /** row to check */ + 2: required binary row, + + /** column family to check */ + 3: required binary family, + + /** column qualifier to check */ + 4: required binary qualifier, + + /** the expected value, if not provided the + check is for the non-existence of the + column in question */ + 5: binary value, + + /** the TDelete to execute if the check succeeds */ + 6: required TDelete deleteSingle + ) throws (1: TIOError io) + + TResult increment( + /** the table to increment the value on */ + 1: required binary table, + + /** the TIncrement to increment */ + 2: required TIncrement increment + ) throws (1: TIOError io) + + /** + * Get a Scanner for the provided TScan object. + * + * @return Scanner Id to be used with other scanner procedures + */ + i32 openScanner( + /** the table to get the Scanner for */ + 1: required binary table, + + /** the scan object to get a Scanner for */ + 2: required TScan scan, + ) throws (1: TIOError io) + + /** + * Grabs multiple rows from a Scanner. + * + * @return Between zero and numRows TResults + */ + list getScannerRows( + /** the Id of the Scanner to return rows from. This is an Id returned from the openScanner function. */ + 1: required i32 scannerId, + + /** number of rows to return */ + 2: i32 numRows = 1 + ) throws ( + 1: TIOError io, + + /** if the scannerId is invalid */ + 2: TIllegalArgument ia + ) + + /** + * Closes the scanner. Should be called if you need to close + * the Scanner before all results are read. + * + * Exhausted scanners are closed automatically. + */ + void closeScanner( + /** the Id of the Scanner to close **/ + 1: required i32 scannerId + ) throws ( + 1: TIOError io, + + /** if the scannerId is invalid */ + 2: TIllegalArgument ia + ) + +} Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java?rev=1471247&r1=1471246&r2=1471247&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (original) +++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java Wed Apr 24 04:45:44 2013 @@ -53,7 +53,6 @@ import org.junit.experimental.categories public class TestLoadIncrementalHFiles { private static final byte[] QUALIFIER = Bytes.toBytes("myqual"); private static final byte[] FAMILY = Bytes.toBytes("myfam"); - private static final String EXPECTED_MSG_FOR_NON_EXISTING_FAMILY = "Unmatched family names found"; private static final byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("ddd"), @@ -189,11 +188,6 @@ public class TestLoadIncrementalHFiles { HBaseAdmin admin = new HBaseAdmin(util.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TABLE); - // set real family name to upper case in purpose to simulate the case that - // family name in HFiles is invalid - HColumnDescriptor family = - new HColumnDescriptor(Bytes.toBytes(new String(FAMILY).toUpperCase())); - htd.addFamily(family); admin.createTable(htd, SPLIT_KEYS); HTable table = new HTable(util.getConfiguration(), TABLE); @@ -204,11 +198,6 @@ public class TestLoadIncrementalHFiles { assertTrue("Loading into table with non-existent family should have failed", false); } catch (Exception e) { assertTrue("IOException expected", e instanceof IOException); - // further check whether the exception message is correct - String errMsg = e.getMessage(); - assertTrue("Incorrect exception message, expected message: [" - + EXPECTED_MSG_FOR_NON_EXISTING_FAMILY + "], current message: [" + errMsg + "]", - errMsg.contains(EXPECTED_MSG_FOR_NON_EXISTING_FAMILY)); } table.close(); admin.close();