hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mbau...@apache.org
Subject svn commit: r1345453 - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop/hbase/regionserver/wal/ main/java/org/apache/hadoop/hbase/thrift/ main/java/org/apache...
Date Sat, 02 Jun 2012 06:28:10 GMT
Author: mbautin
Date: Sat Jun  2 06:28:09 2012
New Revision: 1345453

URL: http://svn.apache.org/viewvc?rev=1345453&view=rev
Log:
[jira] [HBASE-6064] [89-fb] Add timestamp to the Thrift mutation API

Author: mbautin

Summary: Adding timestamp to the Mutation Thrift datatype. If timestamp is set to LATEST_TIMESTAMP in a particular mutation, the timestamp provided as a Thrift call argument is used for that mutation. Also adding a new unit test for Thrift mutation API and a couple of bugs/optimizations to the checkAndMutate implementation.

Test Plan: Unit tests

Reviewers: schen, liyintang, kannan, kranganathan, aaiyer, avf

Reviewed By: avf

CC: nzhang, chip

Differential Revision: https://reviews.facebook.net/D3381

Added:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Constants.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftMutationAPI.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerTestBase.java
Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
    hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HTestConst.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java Sat Jun  2 06:28:09 2012
@@ -549,6 +549,9 @@ public final class HConstants {
   public static final byte [] NO_NEXT_INDEXED_KEY = Bytes.toBytes("NO_NEXT_INDEXED_KEY");
 
   public static final int MULTIPUT_SUCCESS = -1;
+
+  public static final boolean[] BOOLEAN_VALUES = { false, true };
+
   private HConstants() {
     // Can't be instantiated with this ctor.
   }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sat Jun  2 06:28:09 2012
@@ -2140,7 +2140,7 @@ public class HRegion implements HeapSize
 
     splitsAndClosesLock.readLock().lock();
     try {
-      RowLock lock = isPut ? ((Put)w).getRowLock() : ((Delete)w).getRowLock();
+      RowLock lock = ((Mutation) w).getRowLock();
       Get get = new Get(row, lock);
       checkFamily(family);
       get.addColumn(family, qualifier);
@@ -2156,9 +2156,13 @@ public class HRegion implements HeapSize
             && (expectedValue == null || expectedValue.length == 0)) {
           matches = true;
         } else if (result.size() == 1) {
-          //Compare the expected value with the actual value
-          byte [] actualValue = result.get(0).getValue();
-          matches = Bytes.equals(expectedValue, actualValue);
+          // Compare the expected value with the actual value without copying anything
+          KeyValue kv = result.get(0);
+          matches = Bytes.equals(
+              expectedValue, 0, expectedValue.length,
+              kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+        } else {
+          throw new IOException("Internal error: more than one result returned for row/column");
         }
         //If matches put the new put or delete the new delete
         if (matches) {

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Sat Jun  2 06:28:09 2012
@@ -68,6 +68,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.Syncable;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java Sat Jun  2 06:28:09 2012
@@ -393,6 +393,24 @@ public class ThriftServerRunner implemen
   }
 
   /**
+   * Retrieve timestamp from the given mutation Thrift object. If the mutation timestamp is not set
+   * or is set to {@link HConstants#LATEST_TIMESTAMP}, the default timestamp is used.
+   * @param m mutation a mutation object optionally specifying a timestamp
+   * @param defaultTimestamp default timestamp to use if the mutation does not specify timestamp
+   * @return the effective mutation timestamp
+   */
+  public static long getMutationTimestamp(Mutation m, long defaultTimestamp) {
+    if (!m.isSetTimestamp()) {
+      return defaultTimestamp;
+    }
+    long ts = m.getTimestamp();
+    if (ts == HConstants.LATEST_TIMESTAMP) {
+      return defaultTimestamp;
+    }
+    return ts;
+  }
+
+  /**
    * The HBaseHandler is a glue object that connects Thrift RPC calls to the
    * HBase client API primarily defined in the HBaseAdmin and HTable objects.
    */
@@ -919,23 +937,25 @@ public class ThriftServerRunner implemen
         boolean writeToWAL = false;
         for (Mutation m : mutations) {
           byte[][] famAndQf = KeyValue.parseColumn(getBytes(m.column));
+          long effectiveTimestamp = getMutationTimestamp(m, timestamp);
           if (m.isDelete) {
             if (delete == null) {
               delete = new Delete(rowBytes);
             }
             if (famAndQf.length == 1) {
-              delete.deleteFamily(famAndQf[0], timestamp);
+              delete.deleteFamily(famAndQf[0], effectiveTimestamp);
             } else {
-              delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
+              delete.deleteColumns(famAndQf[0], famAndQf[1], effectiveTimestamp);
             }
           } else {
             if (put == null) {
               put = new Put(rowBytes, timestamp, null);
             }
             if (famAndQf.length == 1) {
-              put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, getBytes(m.value));
+              put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, effectiveTimestamp,
+                  getBytes(m.value));
             } else {
-              put.add(famAndQf[0], famAndQf[1], getBytes(m.value));
+              put.add(famAndQf[0], famAndQf[1], effectiveTimestamp, getBytes(m.value));
             }
           }
 
@@ -990,24 +1010,30 @@ public class ThriftServerRunner implemen
         boolean writeToWAL = false;
         for (Mutation m : mutations) {
           byte[][] famAndQf = KeyValue.parseColumn(getBytes(m.column));
+
+          // If this mutation has timestamp set, it takes precedence, otherwise we use the
+          // timestamp provided in the argument.
+          long effectiveTimestamp = getMutationTimestamp(m, timestamp);
+
           if (m.isDelete) {
             if (delete == null) {
               delete = new Delete(row);
             }
             // no qualifier, family only.
             if (famAndQf.length == 1) {
-              delete.deleteFamily(famAndQf[0], timestamp);
+              delete.deleteFamily(famAndQf[0], effectiveTimestamp);
             } else {
-              delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
+              delete.deleteColumns(famAndQf[0], famAndQf[1], effectiveTimestamp);
             }
           } else {
             if (put == null) {
               put = new Put(row, timestamp, null);
             }
             if(famAndQf.length == 1) {
-              put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, getBytes(m.value));
+              put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, effectiveTimestamp,
+                  getBytes(m.value));
             } else {
-              put.add(famAndQf[0], famAndQf[1], getBytes(m.value));
+              put.add(famAndQf[0], famAndQf[1], effectiveTimestamp, getBytes(m.value));
             }
           }
           if (firstMutation) {
@@ -1086,19 +1112,24 @@ public class ThriftServerRunner implemen
         Delete delete = new Delete(rowBytes);
 
         for (Mutation m : mutations) {
-          byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(m.column));
+
+          // If this mutation has timestamp set, it takes precedence, otherwise we use the
+          // timestamp provided in the argument.
+          long effectiveTimestamp = getMutationTimestamp(m, timestamp);
+
+          byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytesRemaining(m.column));
           if (m.isDelete) {
             if (famAndQf.length == 1) {
-              delete.deleteFamily(famAndQf[0], timestamp);
+              delete.deleteFamily(famAndQf[0], effectiveTimestamp);
             } else {
-              delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
+              delete.deleteColumns(famAndQf[0], famAndQf[1], effectiveTimestamp);
             }
           } else {
             byte[] valueBytes = getBytes(m.value);
             if (famAndQf.length == 1) {
-              put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, valueBytes);
+              put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, effectiveTimestamp, valueBytes);
             } else {
-              put.add(famAndQf[0], famAndQf[1], valueBytes);
+              put.add(famAndQf[0], famAndQf[1], effectiveTimestamp, valueBytes);
             }
           }
         }
@@ -1107,7 +1138,7 @@ public class ThriftServerRunner implemen
 
         if (!delete.isEmpty() && !put.isEmpty()) {
           // can't do both, not atomic, not good idea!
-          throw new IllegalArgumentException(
+          throw new IllegalArgument(
               "Single Thrift CheckAndMutate call cannot do both puts and deletes.");
         }
         if (!delete.isEmpty()) {
@@ -1120,7 +1151,7 @@ public class ThriftServerRunner implemen
                   famAndQfCheck.length != 1 ? famAndQfCheck[1]
                       : HConstants.EMPTY_BYTE_ARRAY, valueCheckBytes, put);
         }
-        throw new IllegalArgumentException(
+        throw new IllegalArgument(
             "Thrift CheckAndMutate call must do either put or delete.");
       } catch (IOException e) {
         throw new IOError(e.getMessage());

Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Constants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Constants.java?rev=1345453&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Constants.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Constants.java Sat Jun  2 06:28:09 2012
@@ -0,0 +1,34 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class Constants {
+
+  public static final long LATEST_TIMESTAMP = 9223372036854775807L;
+
+}

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java Sat Jun  2 06:28:09 2012
@@ -37,6 +37,7 @@ public class Mutation implements org.apa
   private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)2);
   private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)3);
   private static final org.apache.thrift.protocol.TField WRITE_TO_WAL_FIELD_DESC = new org.apache.thrift.protocol.TField("writeToWAL", org.apache.thrift.protocol.TType.BOOL, (short)4);
+  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)5);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -48,13 +49,15 @@ public class Mutation implements org.apa
   public ByteBuffer column; // required
   public ByteBuffer value; // required
   public boolean writeToWAL; // required
+  public long timestamp; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     IS_DELETE((short)1, "isDelete"),
     COLUMN((short)2, "column"),
     VALUE((short)3, "value"),
-    WRITE_TO_WAL((short)4, "writeToWAL");
+    WRITE_TO_WAL((short)4, "writeToWAL"),
+    TIMESTAMP((short)5, "timestamp");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -77,6 +80,8 @@ public class Mutation implements org.apa
           return VALUE;
         case 4: // WRITE_TO_WAL
           return WRITE_TO_WAL;
+        case 5: // TIMESTAMP
+          return TIMESTAMP;
         default:
           return null;
       }
@@ -119,7 +124,8 @@ public class Mutation implements org.apa
   // isset id assignments
   private static final int __ISDELETE_ISSET_ID = 0;
   private static final int __WRITETOWAL_ISSET_ID = 1;
-  private BitSet __isset_bit_vector = new BitSet(2);
+  private static final int __TIMESTAMP_ISSET_ID = 2;
+  private BitSet __isset_bit_vector = new BitSet(3);
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -131,6 +137,8 @@ public class Mutation implements org.apa
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
     tmpMap.put(_Fields.WRITE_TO_WAL, new org.apache.thrift.meta_data.FieldMetaData("writeToWAL", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Mutation.class, metaDataMap);
   }
@@ -140,13 +148,16 @@ public class Mutation implements org.apa
 
     this.writeToWAL = true;
 
+    this.timestamp = 9223372036854775807L;
+
   }
 
   public Mutation(
     boolean isDelete,
     ByteBuffer column,
     ByteBuffer value,
-    boolean writeToWAL)
+    boolean writeToWAL,
+    long timestamp)
   {
     this();
     this.isDelete = isDelete;
@@ -155,6 +166,8 @@ public class Mutation implements org.apa
     this.value = value;
     this.writeToWAL = writeToWAL;
     setWriteToWALIsSet(true);
+    this.timestamp = timestamp;
+    setTimestampIsSet(true);
   }
 
   /**
@@ -171,6 +184,7 @@ public class Mutation implements org.apa
       this.value = other.value;
     }
     this.writeToWAL = other.writeToWAL;
+    this.timestamp = other.timestamp;
   }
 
   public Mutation deepCopy() {
@@ -185,6 +199,8 @@ public class Mutation implements org.apa
     this.value = null;
     this.writeToWAL = true;
 
+    this.timestamp = 9223372036854775807L;
+
   }
 
   public boolean isIsDelete() {
@@ -301,6 +317,29 @@ public class Mutation implements org.apa
     __isset_bit_vector.set(__WRITETOWAL_ISSET_ID, value);
   }
 
+  public long getTimestamp() {
+    return this.timestamp;
+  }
+
+  public Mutation setTimestamp(long timestamp) {
+    this.timestamp = timestamp;
+    setTimestampIsSet(true);
+    return this;
+  }
+
+  public void unsetTimestamp() {
+    __isset_bit_vector.clear(__TIMESTAMP_ISSET_ID);
+  }
+
+  /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
+  public boolean isSetTimestamp() {
+    return __isset_bit_vector.get(__TIMESTAMP_ISSET_ID);
+  }
+
+  public void setTimestampIsSet(boolean value) {
+    __isset_bit_vector.set(__TIMESTAMP_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case IS_DELETE:
@@ -335,6 +374,14 @@ public class Mutation implements org.apa
       }
       break;
 
+    case TIMESTAMP:
+      if (value == null) {
+        unsetTimestamp();
+      } else {
+        setTimestamp((Long)value);
+      }
+      break;
+
     }
   }
 
@@ -352,6 +399,9 @@ public class Mutation implements org.apa
     case WRITE_TO_WAL:
       return Boolean.valueOf(isWriteToWAL());
 
+    case TIMESTAMP:
+      return Long.valueOf(getTimestamp());
+
     }
     throw new IllegalStateException();
   }
@@ -371,6 +421,8 @@ public class Mutation implements org.apa
       return isSetValue();
     case WRITE_TO_WAL:
       return isSetWriteToWAL();
+    case TIMESTAMP:
+      return isSetTimestamp();
     }
     throw new IllegalStateException();
   }
@@ -424,6 +476,15 @@ public class Mutation implements org.apa
         return false;
     }
 
+    boolean this_present_timestamp = true;
+    boolean that_present_timestamp = true;
+    if (this_present_timestamp || that_present_timestamp) {
+      if (!(this_present_timestamp && that_present_timestamp))
+        return false;
+      if (this.timestamp != that.timestamp)
+        return false;
+    }
+
     return true;
   }
 
@@ -480,6 +541,16 @@ public class Mutation implements org.apa
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(typedOther.isSetTimestamp());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTimestamp()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, typedOther.timestamp);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -523,6 +594,10 @@ public class Mutation implements org.apa
     sb.append("writeToWAL:");
     sb.append(this.writeToWAL);
     first = false;
+    if (!first) sb.append(", ");
+    sb.append("timestamp:");
+    sb.append(this.timestamp);
+    first = false;
     sb.append(")");
     return sb.toString();
   }
@@ -599,6 +674,14 @@ public class Mutation implements org.apa
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 5: // TIMESTAMP
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.timestamp = iprot.readI64();
+              struct.setTimestampIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -630,6 +713,9 @@ public class Mutation implements org.apa
       oprot.writeFieldBegin(WRITE_TO_WAL_FIELD_DESC);
       oprot.writeBool(struct.writeToWAL);
       oprot.writeFieldEnd();
+      oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC);
+      oprot.writeI64(struct.timestamp);
+      oprot.writeFieldEnd();
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -660,7 +746,10 @@ public class Mutation implements org.apa
       if (struct.isSetWriteToWAL()) {
         optionals.set(3);
       }
-      oprot.writeBitSet(optionals, 4);
+      if (struct.isSetTimestamp()) {
+        optionals.set(4);
+      }
+      oprot.writeBitSet(optionals, 5);
       if (struct.isSetIsDelete()) {
         oprot.writeBool(struct.isDelete);
       }
@@ -673,12 +762,15 @@ public class Mutation implements org.apa
       if (struct.isSetWriteToWAL()) {
         oprot.writeBool(struct.writeToWAL);
       }
+      if (struct.isSetTimestamp()) {
+        oprot.writeI64(struct.timestamp);
+      }
     }
 
     @Override
     public void read(org.apache.thrift.protocol.TProtocol prot, Mutation struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(4);
+      BitSet incoming = iprot.readBitSet(5);
       if (incoming.get(0)) {
         struct.isDelete = iprot.readBool();
         struct.setIsDeleteIsSet(true);
@@ -695,6 +787,10 @@ public class Mutation implements org.apa
         struct.writeToWAL = iprot.readBool();
         struct.setWriteToWALIsSet(true);
       }
+      if (incoming.get(4)) {
+        struct.timestamp = iprot.readI64();
+        struct.setTimestampIsSet(true);
+      }
     }
   }
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java Sat Jun  2 06:28:09 2012
@@ -242,7 +242,8 @@ public class Bytes {
   }
 
   /**
-   * Returns a new byte array, copied from the passed ByteBuffer.
+   * Returns a new byte array, copied from the passed ByteBuffer. Starts from the array offset
+   * of the buffer and copies bytes to the limit of the buffer.
    * @param bb A ByteBuffer
    * @return the byte array
    */
@@ -254,6 +255,19 @@ public class Bytes {
   }
 
   /**
+   * Returns a new byte array, copied from the passed ByteBuffer. Starts from the current position
+   * in the buffer and copies all the remaining bytes to the limit of the buffer.
+   * @param bb A ByteBuffer
+   * @return the byte array
+   */
+  public static byte[] toBytesRemaining(ByteBuffer bb) {
+    int length = bb.remaining();
+    byte [] result = new byte[length];
+    System.arraycopy(bb.array(), bb.arrayOffset() + bb.position(), result, 0, length);
+    return result;
+  }
+
+  /**
    * @param b Presumed UTF-8 encoded byte array.
    * @return String made from <code>b</code>
    */
@@ -943,6 +957,15 @@ public class Bytes {
             ? false : compareTo(left, right) == 0);
   }
 
+  public static boolean equals(final byte[] left, int leftOffset, int leftLength,
+      final byte[] right, int rightOffset, int rightLength) {
+    if (left == null && right == null) {
+      return true;
+    }
+    return (left == null || right == null || (leftLength != rightLength) ? false : compareTo(left,
+        leftOffset, leftLength, right, rightOffset, rightLength) == 0);
+  }
+
   /**
    * Return true if the byte array on the right is a prefix of the byte
    * array on the left.

Modified: hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift (original)
+++ hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift Sat Jun  2 06:28:09 2012
@@ -50,6 +50,8 @@ typedef binary Text
 typedef binary Bytes
 typedef i32    ScannerID
 
+const i64 LATEST_TIMESTAMP = 0x7fffffffffffffff;
+
 /**
  * TCell - Used to transport a cell value (byte[]) and the timestamp it was
  * stored with together as a result for get and getRow methods. This promotes
@@ -99,6 +101,7 @@ struct Mutation {
   2:Text column,
   3:Text value,
   4:bool writeToWAL = 1
+  5:i64 timestamp = LATEST_TIMESTAMP
 }
 
 /**

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Sat Jun  2 06:28:09 2012
@@ -1542,4 +1542,12 @@ REGION_LOOP:
   public void setFileSystemURI(String fsURI) {
     this.fsURI = fsURI;
   }
+
+  /**
+   * Sets the current thread name to the caller's method name. 
+   */
+  public static void setThreadNameFromMethod() {
+    String methodName = new Throwable().getStackTrace()[1].getMethodName();
+    Thread.currentThread().setName(methodName);
+  }
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HTestConst.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HTestConst.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HTestConst.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HTestConst.java Sat Jun  2 06:28:09 2012
@@ -46,6 +46,18 @@ public class HTestConst {
 
   public static final List<ColumnDescriptor> DEFAULT_COLUMN_DESC_LIST;
 
+  public static byte[] getRowBytes(int i) {
+    return Bytes.toBytes("row" + i);
+  }
+
+  public static String getQualStr(int i) {
+    return "column" + i;
+  }
+
+  public static byte[] getCFQualBytes(int i) {
+    return Bytes.toBytes(DEFAULT_CF_STR + KeyValue.COLUMN_FAMILY_DELIMITER + getQualStr(i));
+  }
+
   static {
     List<ColumnDescriptor> cdList = new ArrayList<ColumnDescriptor>();
     ColumnDescriptor cd = new ColumnDescriptor();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java Sat Jun  2 06:28:09 2012
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.thrift;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-import java.net.InetAddress;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -44,51 +43,16 @@ import org.apache.hadoop.hbase.thrift.ge
 import org.apache.hadoop.hbase.thrift.generated.TCell;
 import org.apache.hadoop.hbase.thrift.generated.TRowResult;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TFramedTransport;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 /** Tests switching writing to WAL on and off in the Thrift Mutation API */
-public class TestMutationWriteToWAL {
+public class TestMutationWriteToWAL extends ThriftServerTestBase {
 
   private static Log LOG = LogFactory.getLog(TestMutationWriteToWAL.class);
 
-  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static ThriftServerRunner serverRunner;
-  private static Thread thriftServerThread;
-  private static int thriftServerPort;
-
   private static final int NUM_ROWS = 10;
   private static final int NUM_COLS_PER_ROW = 25;
 
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    Thread.currentThread().setName(TestMutationWriteToWAL.class.getSimpleName() + ".setUp");
-    Configuration conf = TEST_UTIL.getConfiguration();
-    thriftServerPort = HBaseTestingUtility.randomFreePort();
-    conf.setInt(HConstants.THRIFT_PROXY_PREFIX + HConstants.THRIFT_PORT_SUFFIX,
-        thriftServerPort);
-    TEST_UTIL.startMiniCluster();
-    serverRunner = new ThriftServerRunner(conf, HConstants.THRIFT_PROXY_PREFIX);
-
-    thriftServerThread = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        Thread.currentThread().setName("thriftServerThread");
-        serverRunner.run();
-      }
-    });
-    thriftServerThread.start();
-    LOG.info("Waiting for Thrift server to come online");
-    HBaseTestingUtility.waitForHostPort(HConstants.LOCALHOST, thriftServerPort);
-    LOG.info("Thrift server is online");
-  }
-
   private static final String getRow(int i) {
     return String.format("row%05d", i);
   }
@@ -107,20 +71,13 @@ public class TestMutationWriteToWAL {
 
   @Test
   public void testMutationWriteToWAL() throws Exception{
-    Thread.currentThread().setName(TestMutationWriteToWAL.class.getSimpleName() + " test");
+    HBaseTestingUtility.setThreadNameFromMethod();
     final Configuration conf = TEST_UTIL.getConfiguration();
     FileSystem fs = FileSystem.get(conf);
     List<String> expectedLogEntries = new ArrayList<String>();
 
-    LOG.info("Connecting Thrift server on port " + thriftServerPort);
-    TSocket sock = new TSocket(InetAddress.getLocalHost().getHostName(),
-        thriftServerPort);
-    TTransport transport = new TFramedTransport(sock);
-    sock.open();
-
     try {
-      TProtocol prot = new TBinaryProtocol(transport);
-      Hbase.Client client = new Hbase.Client(prot);
+      Hbase.Client client = createClient();
       client.createTable(HTestConst.DEFAULT_TABLE_BYTE_BUF, HTestConst.DEFAULT_COLUMN_DESC_LIST);
       int expectedEntriesForRow[] = new int[NUM_ROWS];
       for (int i = NUM_ROWS - 1; i >= 0; --i) {
@@ -142,7 +99,7 @@ public class TestMutationWriteToWAL {
 
           Mutation m = new Mutation(false, ByteBuffer.wrap(Bytes.toBytes(
               HTestConst.DEFAULT_CF_STR + ":" + qual)),
-              ByteBuffer.wrap(Bytes.toBytes(value)), writeToWAL);
+              ByteBuffer.wrap(Bytes.toBytes(value)), writeToWAL, HConstants.LATEST_TIMESTAMP);
           m.isDelete = isDelete;
 
           mutations.add(m);
@@ -185,7 +142,7 @@ public class TestMutationWriteToWAL {
         }
       }
     } finally {
-      sock.close();
+      closeClientSockets();
     }
 
     TEST_UTIL.shutdownMiniHBaseCluster();
@@ -225,13 +182,4 @@ public class TestMutationWriteToWAL {
     assertEquals(expectedLogEntries.toString(), actualLogEntries.toString());
   }
 
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    Thread.currentThread().setName(TestMutationWriteToWAL.class.getSimpleName() + ".tearDown");
-    LOG.info("Shutting down Thrift server");
-    serverRunner.shutdown();
-    thriftServerThread.join();
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
 }

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftMutationAPI.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftMutationAPI.java?rev=1345453&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftMutationAPI.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftMutationAPI.java Sat Jun  2 06:28:09 2012
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.thrift;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTestConst;
+import org.apache.hadoop.hbase.thrift.generated.IOError;
+import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
+import org.apache.hadoop.hbase.thrift.generated.Mutation;
+import org.apache.hadoop.hbase.thrift.generated.BatchMutation;
+import org.apache.hadoop.hbase.thrift.generated.Hbase;
+import org.apache.hadoop.hbase.thrift.generated.TCell;
+import org.apache.hadoop.hbase.thrift.generated.TRowResult;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.thrift.TException;
+import org.junit.Test;
+import static org.apache.hadoop.hbase.HTestConst.getRowBytes;
+import static org.apache.hadoop.hbase.HTestConst.getCFQualBytes;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+public class TestThriftMutationAPI extends ThriftServerTestBase {
+
+  private static final int NUM_ROWS = 3;
+  private static final int NUM_COLS = 3;
+  private static final int NUM_ITER = 10000;
+
+  private static final int TIMESTAMP_STEP = 10;
+
+  private static final double DELETE_PROB = 0.1;
+
+  /** Enable this when debugging */
+  private static final boolean VERBOSE = false;
+
+  private Map<String, Mutation> latestPut = new HashMap<String, Mutation>();
+  private Map<String, Long> latestDeleteTS = new HashMap<String, Long>();
+
+  private int numPut = 0;
+  private int numDelete = 0;
+  private int numMutateRow = 0;
+  private int numCheckAndMutate = 0;
+  private int numMutateRows = 0;
+
+  private static byte[] getValue(int iRow, int iCol, long ts) {
+    return Bytes.toBytes("value_row" + iRow + "_col" + iCol + "_ts" + ts);
+  }
+
+  private static String getMutationKey(ByteBuffer row, Mutation m) {
+    return Bytes.toStringBinary(row) + "_" + Bytes.toStringBinary(m.column);
+  }
+
+  private Mutation getExpectedResult(String mutationKey) {
+    Mutation m = latestPut.get(mutationKey);
+    if (m == null) {
+      return m;
+    }
+    Long delTS = latestDeleteTS.get(mutationKey);
+    if (delTS == null || delTS < m.getTimestamp()) {
+      return m;
+    }
+    return null;
+  }
+
+  private void applyMutation(ByteBuffer row, Mutation m, long defaultTS) {
+    long ts = m.getTimestamp();
+    if (ts == HConstants.LATEST_TIMESTAMP) {
+      ts = defaultTS;
+    }
+    String mutationKey = getMutationKey(row, m);
+    if (m.isDelete) {
+      if (VERBOSE) {
+        System.err.println("Delete: " + mutationKey + ", " + ts);
+      }
+      Long curDeleteTS = latestDeleteTS.get(mutationKey);
+      if (curDeleteTS == null || curDeleteTS < ts) {
+        latestDeleteTS.put(mutationKey, ts);
+      }
+    } else {
+      if (VERBOSE) {
+        System.err.println("Put: " + mutationKey + ", " + ts);
+      }
+      Mutation curPut = latestPut.get(mutationKey);
+      if (curPut == null || curPut.getTimestamp() < ts) {
+        m = new Mutation(m);
+        m.setTimestamp(ts);
+        latestPut.put(mutationKey, m);
+      }
+    }
+  }
+
+  @Test
+  public void testMutations() throws Exception {
+    HBaseTestingUtility.setThreadNameFromMethod();
+    Hbase.Client client = createClient();
+    client.createTable(HTestConst.DEFAULT_TABLE_BYTE_BUF, HTestConst.DEFAULT_COLUMN_DESC_LIST);
+    Random rand = new Random(91729817987L);
+    List<BatchMutation> bms = new ArrayList<BatchMutation>();
+    Set<String> updatedKeys = new HashSet<String>();
+    Map<String, TRowResult> rowResults = new HashMap<String, TRowResult>();
+    for (int iter = 0; iter < NUM_ITER; ++iter) {
+      bms.clear();
+      updatedKeys.clear();
+      rowResults.clear();
+      long baseTS = iter * TIMESTAMP_STEP;
+      long defaultTS = baseTS + TIMESTAMP_STEP / 2;
+
+      generateRandomMutations(rand, bms, updatedKeys, baseTS, defaultTS);
+
+      if (bms.size() == 1) {
+        doSingleRowMutation(client, rand, bms, defaultTS);
+      } else {
+        client.mutateRowsTs(HTestConst.DEFAULT_TABLE_BYTE_BUF, bms, defaultTS);
+        numMutateRows++;
+      }
+      
+      for (BatchMutation bm : bms) {
+        for (Mutation m : bm.getMutations()) {
+          applyMutation(bm.bufferForRow(), m, defaultTS);
+        }
+      }
+
+      for (String k : updatedKeys) {
+        checkRowCol(client, rowResults, k);
+      }
+    }
+
+    assertTrue(numPut > 0);
+    assertTrue(numDelete > 0);
+    assertTrue(numMutateRow > 0);
+    assertTrue(numCheckAndMutate > 0);
+    assertTrue(numMutateRows > 0);
+  }
+
+  private void checkRowCol(Hbase.Client client, Map<String, TRowResult> rowResults, String k)
+      throws IOError, TException {
+    String[] rowCol = k.split("_");
+    String row = rowCol[0];
+    String col = rowCol[1];
+    ByteBuffer rowBuf = ByteBuffer.wrap(Bytes.toBytes(row));
+    ByteBuffer colBuf = ByteBuffer.wrap(Bytes.toBytes(col));
+    TRowResult rowResult = rowResults.get(row);
+    if (rowResult == null) {
+      List<TRowResult> result = client.getRow(HTestConst.DEFAULT_TABLE_BYTE_BUF, rowBuf);
+      if (!result.isEmpty()) {
+        assertEquals(1, result.size());
+        rowResult = result.get(0);
+        rowResults.put(row, rowResult);
+      }
+    }
+    TCell c = null;
+    if (rowResult != null) {
+      Map<ByteBuffer, TCell> columns = rowResult.getColumns();
+      c = columns.get(colBuf);
+    }
+    Mutation expectedResult = getExpectedResult(k);
+    if (expectedResult == null) {
+      if (c != null) {
+        assertNull("Expecting " + k + " to be absent, but found " +
+            Bytes.toStringBinaryRemaining(c.bufferForValue()), c);
+      }
+    } else {
+      assertEquals(Bytes.toString(expectedResult.getValue()),
+          Bytes.toString(c.getValue()));
+    }
+  }
+
+  private void doSingleRowMutation(Hbase.Client client, Random rand, List<BatchMutation> bms,
+      long defaultTS) throws IOError, IllegalArgument, TException {
+    BatchMutation bm = bms.get(0);
+    if (rand.nextBoolean()) {
+      // Exercise check-and-mutate operations too. Must have an existing value to check,
+      // and all mutations should be of the same type (put/delete).
+      int numDeletes = 0;
+      for (Mutation m : bm.getMutations()) {
+        if (m.isDelete) {
+          ++numDeletes;
+        }
+      }
+      if (numDeletes == 0 || numDeletes == bm.getMutations().size()) {
+        for (Mutation m : bm.getMutations()) {
+          String mutationKey = getMutationKey(bm.bufferForRow(), m);
+          Mutation currentValue = getExpectedResult(mutationKey);
+          if (currentValue != null) {
+            if (VERBOSE) {
+              System.err.println("Will do checkAndMutate on " + mutationKey);
+            }
+            client.checkAndMutateRowTs(HTestConst.DEFAULT_TABLE_BYTE_BUF, bm.bufferForRow(),
+                currentValue.bufferForColumn(), currentValue.bufferForValue(),
+                bm.getMutations(), defaultTS);
+            numCheckAndMutate++;
+            return;
+          }
+        }
+      }
+    }
+    
+    // We did not check-and-mutate, do a normal operation.
+    client.mutateRowTs(HTestConst.DEFAULT_TABLE_BYTE_BUF,
+        bm.bufferForRow(), bm.getMutations(), defaultTS);
+    numMutateRow++;
+  }
+
+  private void generateRandomMutations(Random rand, List<BatchMutation> bms,
+      Set<String> updatedKeys, long baseTS, long defaultTS) {
+    while (bms.isEmpty()) {
+      for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
+        if (rand.nextBoolean()) {
+          BatchMutation bm = new BatchMutation();
+          bm.setRow(getRowBytes(iRow));
+          bms.add(bm);
+          while (bm.mutations == null || bm.mutations.isEmpty()) {
+            for (int iCol = 0; iCol < NUM_COLS; ++iCol) {
+              if (rand.nextBoolean()) {
+                Mutation m = new Mutation();
+                m.setIsDelete(rand.nextFloat() < DELETE_PROB);
+                if (m.isDelete) {
+                  numDelete++;
+                } else {
+                  numPut++;
+                }
+                m.setColumn(getCFQualBytes(iCol));
+                // Make timestamps correlated with the iteration number but with random
+                // shuffling.
+                long ts = rand.nextBoolean() ? (baseTS + rand.nextInt(100))
+                    : HConstants.LATEST_TIMESTAMP;
+                long effectiveTS = ts == HConstants.LATEST_TIMESTAMP ? defaultTS : ts;  
+                m.setTimestamp(ts);
+                m.setValue(getValue(iRow, iCol, effectiveTS));
+                bm.addToMutations(m);
+                updatedKeys.add(getMutationKey(bm.row, m));
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+}

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java Sat Jun  2 06:28:09 2012
@@ -36,8 +36,11 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.filter.ParseFilter;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.thrift.generated.BatchMutation;
 import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
+import org.apache.hadoop.hbase.thrift.generated.Constants;
 import org.apache.hadoop.hbase.thrift.generated.Hbase;
 import org.apache.hadoop.hbase.thrift.generated.IOError;
 import org.apache.hadoop.hbase.thrift.generated.Mutation;
@@ -55,6 +58,8 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableList;
+
 /**
  * Unit testing for ThriftServerRunner.HBaseHandler, a part of the
  * org.apache.hadoop.hbase.thrift package.
@@ -76,10 +81,13 @@ public class TestThriftServer {
   static final ByteBuffer rowAname = asByteBuffer("rowA");
   static final ByteBuffer rowBname = asByteBuffer("rowB");
   static final ByteBuffer valueAname = asByteBuffer("valueA");
+  static final ByteBuffer valueAModified = asByteBuffer("valueAModified");
   static final ByteBuffer valueBname = asByteBuffer("valueB");
   static final ByteBuffer valueCname = asByteBuffer("valueC");
   static final ByteBuffer valueDname = asByteBuffer("valueD");
 
+  private static final int MAX_VERSIONS = 2;
+
   @BeforeClass
   public static void beforeClass() throws Exception {
     UTIL.startMiniCluster();
@@ -98,7 +106,6 @@ public class TestThriftServer {
    *
    * @throws Exception
    */
-  @Test
   public void testAll() throws Exception {
     // Run all tests
     doTestTableCreateDrop();
@@ -118,6 +125,7 @@ public class TestThriftServer {
    *
    * @throws Exception
    */
+  @Test
   public void doTestTableCreateDrop() throws Exception {
     ThriftServerRunner.HBaseHandler handler =
       new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration());
@@ -147,6 +155,7 @@ public class TestThriftServer {
   /**
    * Tests if the metrics for thrift handler work correctly
    */
+  @Test
   public void doTestThriftMetrics() throws Exception {
     Configuration conf = UTIL.getConfiguration();
     ThriftMetrics metrics = getMetrics(conf);
@@ -242,6 +251,7 @@ public class TestThriftServer {
    *
    * @throws Exception
    */
+  @Test
   public void doTestTableMutations() throws Exception {
     ThriftServerRunner.HBaseHandler handler =
       new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration());
@@ -251,65 +261,102 @@ public class TestThriftServer {
   public static void doTestTableMutations(Hbase.Iface handler) throws Exception {
     // Setup
     handler.createTable(tableAname, getColumnDescriptors());
+    try {
+      // Apply a few Mutations to rowA
+      handler.mutateRow(tableAname, rowAname, getMutations());
+  
+      // Assert that the changes were made
+      assertBufferEquals(valueAname,
+        handler.get(tableAname, rowAname, columnAname).get(0).value);
+      TRowResult rowResult1 = handler.getRow(tableAname, rowAname).get(0);
+      assertBufferEquals(rowAname, rowResult1.row);
+      assertBufferEquals(valueBname,
+        rowResult1.columns.get(columnBname).value);
+  
+      // Apply a few BatchMutations for rowA and rowB
+      handler.mutateRows(tableAname, getBatchMutations());
+  
+      // Assert that changes were made to rowA
+      List<TCell> cells = handler.get(tableAname, rowAname, columnAname);
+      assertFalse(cells.size() > 0);
+      assertBufferEquals(valueCname, handler.get(tableAname, rowAname, columnBname).get(0).value);
+      List<TCell> versions = handler.getVer(tableAname, rowAname, columnBname, MAXVERSIONS);
+      assertBufferEquals(valueCname, versions.get(0).value);
+      assertBufferEquals(valueBname, versions.get(1).value);
+  
+      // Assert that changes were made to rowB
+      TRowResult rowResult2 = handler.getRow(tableAname, rowBname).get(0);
+      assertBufferEquals(rowBname, rowResult2.row);
+      assertBufferEquals(valueCname, rowResult2.columns.get(columnAname).value);
+      assertBufferEquals(valueDname, rowResult2.columns.get(columnBname).value);
+  
+      // Apply some deletes
+      handler.deleteAll(tableAname, rowAname, columnBname);
+      handler.deleteAllRow(tableAname, rowBname);
+  
+      // Assert that the deletes were applied
+      int size = handler.get(tableAname, rowAname, columnBname).size();
+      assertEquals(0, size);
+      size = handler.getRow(tableAname, rowBname).size();
+      assertEquals(0, size);
+  
+      // Try null mutation
+      List<Mutation> mutations = new ArrayList<Mutation>();
+      mutations.add(new Mutation(false, columnAname, null, true, HConstants.LATEST_TIMESTAMP));
+      handler.mutateRow(tableAname, rowAname, mutations);
+      TRowResult rowResult3 = handler.getRow(tableAname, rowAname).get(0);
+      assertEquals(rowAname, rowResult3.row);
+      assertEquals(0, rowResult3.columns.get(columnAname).value.remaining());
+  
+      // Try specifying timestamps with mutations
+      mutations.clear();
+      handler.deleteAllRow(tableAname, rowAname);
+      assertEquals(0, handler.getRow(tableAname, rowAname).size());
+
+      // Use a high timestamp to make sure our insertions come later than the above delete-all with
+      // an auto-generated timestamp.
+      // Testing mutateRow.
+      long highTS = HConstants.LATEST_TIMESTAMP / 2;
+      assertEquals(0, handler.getRow(tableAname, rowAname).size());
+      mutations.add(new Mutation(false, columnAname, valueAModified, true, highTS + 257));
+      mutations.add(new Mutation(false, columnAname, valueAname, true, highTS + 135));
+      handler.mutateRow(tableAname, rowAname, mutations);
+      List<TRowResult> results = handler.getRow(tableAname, rowAname);
+      assertEquals(1, results.size());
+      assertBufferEquals(valueAModified, results.get(0).getColumns().get(columnAname).value);
+
+      handler.deleteAllTs(tableAname, rowAname, columnAname, highTS + 999);
+      assertEquals(0, handler.getRow(tableAname, rowAname).size());
+
+      // Testing mutateRowTs.
+      for (boolean firstIsDeletion : HConstants.BOOLEAN_VALUES) {
+        for (boolean secondIsDeletion : HConstants.BOOLEAN_VALUES) {
+          mutations.clear();
+          // This mutation has a default timestamp specified, so it will be executed with the
+          // timestamp given to the mutateRowTs method.
+          mutations.add(new Mutation(firstIsDeletion, columnAname, valueAModified, true,
+              HConstants.LATEST_TIMESTAMP)); 
+          mutations.add(new Mutation(secondIsDeletion, columnAname, valueAname, true,
+              highTS + 1200));
+          handler.mutateRowTs(tableAname, rowAname, mutations, highTS + 1100);
+          results = handler.getRow(tableAname, rowAname);
+          if (secondIsDeletion) {
+            assertEquals(0, results.size());
+          } else {
+            assertEquals(1, results.size());
+            assertBufferEquals(valueAname, results.get(0).getColumns().get(columnAname).value);
+          }
+          handler.deleteAllRowTs(tableAname, rowAname, highTS + 10000);
+          assertEquals(0, handler.getRow(tableAname, rowAname).size());
+          highTS += 20000;
+        }
+      }
 
-    // Apply a few Mutations to rowA
-    //     mutations.add(new Mutation(false, columnAname, valueAname));
-    //     mutations.add(new Mutation(false, columnBname, valueBname));
-    handler.mutateRow(tableAname, rowAname, getMutations());
-
-    // Assert that the changes were made
-    assertBufferEquals(valueAname,
-      handler.get(tableAname, rowAname, columnAname).get(0).value);
-    TRowResult rowResult1 = handler.getRow(tableAname, rowAname).get(0);
-    assertBufferEquals(rowAname, rowResult1.row);
-    assertBufferEquals(valueBname,
-      rowResult1.columns.get(columnBname).value);
-
-    // Apply a few BatchMutations for rowA and rowB
-    // rowAmutations.add(new Mutation(true, columnAname));
-    // rowAmutations.add(new Mutation(false, columnBname, valueCname));
-    // batchMutations.add(new BatchMutation(rowAname, rowAmutations));
-    // Mutations to rowB
-    // rowBmutations.add(new Mutation(false, columnAname, valueCname));
-    // rowBmutations.add(new Mutation(false, columnBname, valueDname));
-    // batchMutations.add(new BatchMutation(rowBname, rowBmutations));
-    handler.mutateRows(tableAname, getBatchMutations());
-
-    // Assert that changes were made to rowA
-    List<TCell> cells = handler.get(tableAname, rowAname, columnAname);
-    assertFalse(cells.size() > 0);
-    assertBufferEquals(valueCname, handler.get(tableAname, rowAname, columnBname).get(0).value);
-    List<TCell> versions = handler.getVer(tableAname, rowAname, columnBname, MAXVERSIONS);
-    assertBufferEquals(valueCname, versions.get(0).value);
-    assertBufferEquals(valueBname, versions.get(1).value);
-
-    // Assert that changes were made to rowB
-    TRowResult rowResult2 = handler.getRow(tableAname, rowBname).get(0);
-    assertBufferEquals(rowBname, rowResult2.row);
-    assertBufferEquals(valueCname, rowResult2.columns.get(columnAname).value);
-    assertBufferEquals(valueDname, rowResult2.columns.get(columnBname).value);
-
-    // Apply some deletes
-    handler.deleteAll(tableAname, rowAname, columnBname);
-    handler.deleteAllRow(tableAname, rowBname);
-
-    // Assert that the deletes were applied
-    int size = handler.get(tableAname, rowAname, columnBname).size();
-    assertEquals(0, size);
-    size = handler.getRow(tableAname, rowBname).size();
-    assertEquals(0, size);
-
-    // Try null mutation
-    List<Mutation> mutations = new ArrayList<Mutation>();
-    mutations.add(new Mutation(false, columnAname, null, true));
-    handler.mutateRow(tableAname, rowAname, mutations);
-    TRowResult rowResult3 = handler.getRow(tableAname, rowAname).get(0);
-    assertEquals(rowAname, rowResult3.row);
-    assertEquals(0, rowResult3.columns.get(columnAname).value.remaining());
-
-    // Teardown
-    handler.disableTable(tableAname);
-    handler.deleteTable(tableAname);
+    } finally {
+      // Teardown
+      handler.disableTable(tableAname);
+      handler.deleteTable(tableAname);
+    }
   }
 
   /**
@@ -319,77 +366,80 @@ public class TestThriftServer {
    *
    * @throws Exception
    */
+  @Test
   public void doTestTableTimestampsAndColumns() throws Exception {
     // Setup
     ThriftServerRunner.HBaseHandler handler =
       new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration());
     handler.createTable(tableAname, getColumnDescriptors());
 
-    // Apply timestamped Mutations to rowA
-    long time1 = System.currentTimeMillis();
-    handler.mutateRowTs(tableAname, rowAname, getMutations(), time1);
-
-    Thread.sleep(1000);
-
-    // Apply timestamped BatchMutations for rowA and rowB
-    long time2 = System.currentTimeMillis();
-    handler.mutateRowsTs(tableAname, getBatchMutations(), time2);
-
-    // Apply an overlapping timestamped mutation to rowB
-    handler.mutateRowTs(tableAname, rowBname, getMutations(), time2);
-
-    // the getVerTs is [inf, ts) so you need to increment one.
-    time1 += 1;
-    time2 += 2;
-
-    // Assert that the timestamp-related methods retrieve the correct data
-    assertEquals(2, handler.getVerTs(tableAname, rowAname, columnBname, time2,
-      MAXVERSIONS).size());
-    assertEquals(1, handler.getVerTs(tableAname, rowAname, columnBname, time1,
-      MAXVERSIONS).size());
-
-    TRowResult rowResult1 = handler.getRowTs(tableAname, rowAname, time1).get(0);
-    TRowResult rowResult2 = handler.getRowTs(tableAname, rowAname, time2).get(0);
-    // columnA was completely deleted
-    //assertTrue(Bytes.equals(rowResult1.columns.get(columnAname).value, valueAname));
-    assertBufferEquals(rowResult1.columns.get(columnBname).value, valueBname);
-    assertBufferEquals(rowResult2.columns.get(columnBname).value, valueCname);
-
-    // ColumnAname has been deleted, and will never be visible even with a getRowTs()
-    assertFalse(rowResult2.columns.containsKey(columnAname));
-
-    List<ByteBuffer> columns = new ArrayList<ByteBuffer>();
-    columns.add(columnBname);
-
-    rowResult1 = handler.getRowWithColumns(tableAname, rowAname, columns).get(0);
-    assertBufferEquals(rowResult1.columns.get(columnBname).value, valueCname);
-    assertFalse(rowResult1.columns.containsKey(columnAname));
-
-    rowResult1 = handler.getRowWithColumnsTs(tableAname, rowAname, columns, time1).get(0);
-    assertBufferEquals(rowResult1.columns.get(columnBname).value, valueBname);
-    assertFalse(rowResult1.columns.containsKey(columnAname));
-
-    // Apply some timestamped deletes
-    // this actually deletes _everything_.
-    // nukes everything in columnB: forever.
-    handler.deleteAllTs(tableAname, rowAname, columnBname, time1);
-    handler.deleteAllRowTs(tableAname, rowBname, time2);
-
-    // Assert that the timestamp-related methods retrieve the correct data
-    int size = handler.getVerTs(tableAname, rowAname, columnBname, time1, MAXVERSIONS).size();
-    assertEquals(0, size);
-
-    size = handler.getVerTs(tableAname, rowAname, columnBname, time2, MAXVERSIONS).size();
-    assertEquals(1, size);
-
-    // should be available....
-    assertBufferEquals(handler.get(tableAname, rowAname, columnBname).get(0).value, valueCname);
-
-    assertEquals(0, handler.getRow(tableAname, rowBname).size());
-
-    // Teardown
-    handler.disableTable(tableAname);
-    handler.deleteTable(tableAname);
+    try {
+      // Apply timestamped Mutations to rowA
+      long time1 = System.currentTimeMillis();
+      handler.mutateRowTs(tableAname, rowAname, getMutations(), time1);
+  
+      Thread.sleep(1000);
+  
+      // Apply timestamped BatchMutations for rowA and rowB
+      long time2 = System.currentTimeMillis();
+      handler.mutateRowsTs(tableAname, getBatchMutations(), time2);
+  
+      // Apply an overlapping timestamped mutation to rowB
+      handler.mutateRowTs(tableAname, rowBname, getMutations(), time2);
+  
+      // the getVerTs is [inf, ts) so you need to increment one.
+      time1 += 1;
+      time2 += 2;
+  
+      // Assert that the timestamp-related methods retrieve the correct data
+      assertEquals(2, handler.getVerTs(tableAname, rowAname, columnBname, time2,
+        MAXVERSIONS).size());
+      assertEquals(1, handler.getVerTs(tableAname, rowAname, columnBname, time1,
+        MAXVERSIONS).size());
+  
+      TRowResult rowResult1 = handler.getRowTs(tableAname, rowAname, time1).get(0);
+      TRowResult rowResult2 = handler.getRowTs(tableAname, rowAname, time2).get(0);
+      // columnA was completely deleted
+      //assertTrue(Bytes.equals(rowResult1.columns.get(columnAname).value, valueAname));
+      assertBufferEquals(rowResult1.columns.get(columnBname).value, valueBname);
+      assertBufferEquals(rowResult2.columns.get(columnBname).value, valueCname);
+  
+      // ColumnAname has been deleted, and will never be visible even with a getRowTs()
+      assertFalse(rowResult2.columns.containsKey(columnAname));
+  
+      List<ByteBuffer> columns = new ArrayList<ByteBuffer>();
+      columns.add(columnBname);
+  
+      rowResult1 = handler.getRowWithColumns(tableAname, rowAname, columns).get(0);
+      assertBufferEquals(rowResult1.columns.get(columnBname).value, valueCname);
+      assertFalse(rowResult1.columns.containsKey(columnAname));
+  
+      rowResult1 = handler.getRowWithColumnsTs(tableAname, rowAname, columns, time1).get(0);
+      assertBufferEquals(rowResult1.columns.get(columnBname).value, valueBname);
+      assertFalse(rowResult1.columns.containsKey(columnAname));
+  
+      // Apply some timestamped deletes
+      // this actually deletes _everything_.
+      // nukes everything in columnB: forever.
+      handler.deleteAllTs(tableAname, rowAname, columnBname, time1);
+      handler.deleteAllRowTs(tableAname, rowBname, time2);
+  
+      // Assert that the timestamp-related methods retrieve the correct data
+      int size = handler.getVerTs(tableAname, rowAname, columnBname, time1, MAXVERSIONS).size();
+      assertEquals(0, size);
+  
+      size = handler.getVerTs(tableAname, rowAname, columnBname, time2, MAXVERSIONS).size();
+      assertEquals(1, size);
+  
+      // should be available....
+      assertBufferEquals(handler.get(tableAname, rowAname, columnBname).get(0).value, valueCname);
+  
+      assertEquals(0, handler.getRow(tableAname, rowBname).size());
+    } finally {
+      // Teardown
+      handler.disableTable(tableAname);
+      handler.deleteTable(tableAname);
+    }
   }
 
   /**
@@ -398,67 +448,70 @@ public class TestThriftServer {
    *
    * @throws Exception
    */
+  @Test
   public void doTestTableScanners() throws Exception {
     // Setup
     ThriftServerRunner.HBaseHandler handler =
       new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration());
     handler.createTable(tableAname, getColumnDescriptors());
-
-    // Apply timestamped Mutations to rowA
-    long time1 = System.currentTimeMillis();
-    handler.mutateRowTs(tableAname, rowAname, getMutations(), time1);
-
-    // Sleep to assure that 'time1' and 'time2' will be different even with a
-    // coarse grained system timer.
-    Thread.sleep(1000);
-
-    // Apply timestamped BatchMutations for rowA and rowB
-    long time2 = System.currentTimeMillis();
-    handler.mutateRowsTs(tableAname, getBatchMutations(), time2);
-
-    time1 += 1;
-
-    // Test a scanner on all rows and all columns, no timestamp
-    int scanner1 = handler.scannerOpen(tableAname, rowAname, getColumnList(true, true));
-    TRowResult rowResult1a = handler.scannerGet(scanner1).get(0);
-    assertBufferEquals(rowResult1a.row, rowAname);
-    // This used to be '1'.  I don't know why when we are asking for two columns
-    // and when the mutations above would seem to add two columns to the row.
-    // -- St.Ack 05/12/2009
-    assertEquals(rowResult1a.columns.size(), 1);
-    assertBufferEquals(rowResult1a.columns.get(columnBname).value, valueCname);
-
-    TRowResult rowResult1b = handler.scannerGet(scanner1).get(0);
-    assertBufferEquals(rowResult1b.row, rowBname);
-    assertEquals(rowResult1b.columns.size(), 2);
-    assertBufferEquals(rowResult1b.columns.get(columnAname).value, valueCname);
-    assertBufferEquals(rowResult1b.columns.get(columnBname).value, valueDname);
-    closeScanner(scanner1, handler);
-
-    // Test a scanner on all rows and all columns, with timestamp
-    int scanner2 = handler.scannerOpenTs(tableAname, rowAname, getColumnList(true, true), time1);
-    TRowResult rowResult2a = handler.scannerGet(scanner2).get(0);
-    assertEquals(rowResult2a.columns.size(), 1);
-    // column A deleted, does not exist.
-    //assertTrue(Bytes.equals(rowResult2a.columns.get(columnAname).value, valueAname));
-    assertBufferEquals(rowResult2a.columns.get(columnBname).value, valueBname);
-    closeScanner(scanner2, handler);
-
-    // Test a scanner on the first row and first column only, no timestamp
-    int scanner3 = handler.scannerOpenWithStop(tableAname, rowAname, rowBname,
-        getColumnList(true, false));
-    closeScanner(scanner3, handler);
-
-    // Test a scanner on the first row and second column only, with timestamp
-    int scanner4 = handler.scannerOpenWithStopTs(tableAname, rowAname, rowBname,
-        getColumnList(false, true), time1);
-    TRowResult rowResult4a = handler.scannerGet(scanner4).get(0);
-    assertEquals(rowResult4a.columns.size(), 1);
-    assertBufferEquals(rowResult4a.columns.get(columnBname).value, valueBname);
-
-    // Teardown
-    handler.disableTable(tableAname);
-    handler.deleteTable(tableAname);
+    
+    try {
+      // Apply timestamped Mutations to rowA
+      long time1 = System.currentTimeMillis();
+      handler.mutateRowTs(tableAname, rowAname, getMutations(), time1);
+  
+      // Sleep to assure that 'time1' and 'time2' will be different even with a
+      // coarse grained system timer.
+      Thread.sleep(1000);
+  
+      // Apply timestamped BatchMutations for rowA and rowB
+      long time2 = System.currentTimeMillis();
+      handler.mutateRowsTs(tableAname, getBatchMutations(), time2);
+  
+      time1 += 1;
+  
+      // Test a scanner on all rows and all columns, no timestamp
+      int scanner1 = handler.scannerOpen(tableAname, rowAname, getColumnList(true, true));
+      TRowResult rowResult1a = handler.scannerGet(scanner1).get(0);
+      assertBufferEquals(rowResult1a.row, rowAname);
+      // This used to be '1'.  I don't know why when we are asking for two columns
+      // and when the mutations above would seem to add two columns to the row.
+      // -- St.Ack 05/12/2009
+      assertEquals(rowResult1a.columns.size(), 1);
+      assertBufferEquals(rowResult1a.columns.get(columnBname).value, valueCname);
+  
+      TRowResult rowResult1b = handler.scannerGet(scanner1).get(0);
+      assertBufferEquals(rowResult1b.row, rowBname);
+      assertEquals(rowResult1b.columns.size(), 2);
+      assertBufferEquals(rowResult1b.columns.get(columnAname).value, valueCname);
+      assertBufferEquals(rowResult1b.columns.get(columnBname).value, valueDname);
+      closeScanner(scanner1, handler);
+  
+      // Test a scanner on all rows and all columns, with timestamp
+      int scanner2 = handler.scannerOpenTs(tableAname, rowAname, getColumnList(true, true), time1);
+      TRowResult rowResult2a = handler.scannerGet(scanner2).get(0);
+      assertEquals(rowResult2a.columns.size(), 1);
+      // column A deleted, does not exist.
+      //assertTrue(Bytes.equals(rowResult2a.columns.get(columnAname).value, valueAname));
+      assertBufferEquals(rowResult2a.columns.get(columnBname).value, valueBname);
+      closeScanner(scanner2, handler);
+  
+      // Test a scanner on the first row and first column only, no timestamp
+      int scanner3 = handler.scannerOpenWithStop(tableAname, rowAname, rowBname,
+          getColumnList(true, false));
+      closeScanner(scanner3, handler);
+  
+      // Test a scanner on the first row and second column only, with timestamp
+      int scanner4 = handler.scannerOpenWithStopTs(tableAname, rowAname, rowBname,
+          getColumnList(false, true), time1);
+      TRowResult rowResult4a = handler.scannerGet(scanner4).get(0);
+      assertEquals(rowResult4a.columns.size(), 1);
+      assertBufferEquals(rowResult4a.columns.get(columnBname).value, valueBname);
+    } finally {
+      // Teardown
+      handler.disableTable(tableAname);
+      handler.deleteTable(tableAname);
+    }
   }
 
   /**
@@ -467,6 +520,7 @@ public class TestThriftServer {
    *
    * @throws Exception
    */
+  @Test
   public void doTestGetTableRegions() throws Exception {
     ThriftServerRunner.HBaseHandler handler =
       new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration());
@@ -490,6 +544,7 @@ public class TestThriftServer {
             "but found " + regionCount, regionCount, 0);
   }
 
+  @Test
   public void doTestFilterRegistration() throws Exception {
     Configuration conf = UTIL.getConfiguration();
 
@@ -502,6 +557,7 @@ public class TestThriftServer {
     assertEquals("filterclass", registeredFilters.get("MyFilter"));
   }
 
+  @Test
   public void doTestGetRegionInfo() throws Exception {
     ThriftServerRunner.HBaseHandler handler =
       new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration());
@@ -538,8 +594,9 @@ public class TestThriftServer {
     cDescriptors.add(cDescA);
 
     // A slightly customized ColumnDescriptor (only 2 versions)
-    ColumnDescriptor cDescB = new ColumnDescriptor(columnBname, 2, "NONE",
-        false, "NONE", 0, 0, false, -1);
+    ColumnDescriptor cDescB =
+        new ColumnDescriptor(columnBname, MAX_VERSIONS, Compression.Algorithm.NONE.toString(),
+            false, StoreFile.BloomType.NONE.toString(), 0, 0, false, -1);
     cDescriptors.add(cDescB);
 
     return cDescriptors;
@@ -565,8 +622,8 @@ public class TestThriftServer {
    */
   static List<Mutation> getMutations() {
     List<Mutation> mutations = new ArrayList<Mutation>();
-    mutations.add(new Mutation(false, columnAname, valueAname, true));
-    mutations.add(new Mutation(false, columnBname, valueBname, true));
+    mutations.add(new Mutation(false, columnAname, valueAname, true, HConstants.LATEST_TIMESTAMP));
+    mutations.add(new Mutation(false, columnBname, valueBname, true, HConstants.LATEST_TIMESTAMP));
     return mutations;
   }
 
@@ -582,19 +639,20 @@ public class TestThriftServer {
     List<BatchMutation> batchMutations = new ArrayList<BatchMutation>();
 
     // Mutations to rowA.  You can't mix delete and put anymore.
-    List<Mutation> rowAmutations = new ArrayList<Mutation>();
-    rowAmutations.add(new Mutation(true, columnAname, null, true));
-    batchMutations.add(new BatchMutation(rowAname, rowAmutations));
-
-    rowAmutations = new ArrayList<Mutation>();
-    rowAmutations.add(new Mutation(false, columnBname, valueCname, true));
-    batchMutations.add(new BatchMutation(rowAname, rowAmutations));
+    batchMutations.add(new BatchMutation(rowAname,
+        ImmutableList.of(
+            new Mutation(true, columnAname, null, true, HConstants.LATEST_TIMESTAMP)
+        )));
+
+    batchMutations.add(new BatchMutation(rowAname,
+        ImmutableList.of(new Mutation(false, columnBname, valueCname, true,
+            HConstants.LATEST_TIMESTAMP))));
 
     // Mutations to rowB
-    List<Mutation> rowBmutations = new ArrayList<Mutation>();
-    rowBmutations.add(new Mutation(false, columnAname, valueCname, true));
-    rowBmutations.add(new Mutation(false, columnBname, valueDname, true));
-    batchMutations.add(new BatchMutation(rowBname, rowBmutations));
+    batchMutations.add(new BatchMutation(rowBname, ImmutableList.of(
+        new Mutation(false, columnAname, valueCname, true, HConstants.LATEST_TIMESTAMP),
+        new Mutation(false, columnBname, valueDname, true, HConstants.LATEST_TIMESTAMP)
+    )));
 
     return batchMutations;
   }
@@ -620,4 +678,8 @@ public class TestThriftServer {
     }
   }
 
+  @Test
+  public void testMaxTimestamp() {
+    assertEquals(HConstants.LATEST_TIMESTAMP, Constants.LATEST_TIMESTAMP);
+  }
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java?rev=1345453&r1=1345452&r2=1345453&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java Sat Jun  2 06:28:09 2012
@@ -45,6 +45,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
 import org.apache.hadoop.hbase.thrift.generated.Hbase;
 import org.apache.hadoop.hbase.thrift.generated.Mutation;
@@ -189,7 +190,7 @@ public class TestThriftServerLegacy exte
 
     // Try null mutation
     List<Mutation> mutations = new ArrayList<Mutation>();
-    mutations.add(new Mutation(false, columnAname, null, true));
+    mutations.add(new Mutation(false, columnAname, null, true, HConstants.LATEST_TIMESTAMP));
     handler.mutateRow(tableAname, rowAname, mutations);
     TRowResult rowResult3 = handler.getRow(tableAname, rowAname).get(0);
     assertEquals(rowAname, rowResult3.row);
@@ -457,8 +458,8 @@ public class TestThriftServerLegacy exte
    */
   private List<Mutation> getMutations2() {
     List<Mutation> mutations = new ArrayList<Mutation>();
-    mutations.add(new Mutation(false, columnAname, valueCname, true));
-    mutations.add(new Mutation(false, columnBname, valueDname, true));
+    mutations.add(new Mutation(false, columnAname, valueCname, true, HConstants.LATEST_TIMESTAMP));
+    mutations.add(new Mutation(false, columnBname, valueDname, true, HConstants.LATEST_TIMESTAMP));
     return mutations;
   }
 

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerTestBase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerTestBase.java?rev=1345453&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerTestBase.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerTestBase.java Sat Jun  2 06:28:09 2012
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.thrift;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.thrift.generated.Hbase;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * A base class for Thrift API tests. 
+ */
+public class ThriftServerTestBase {
+
+  private static Log LOG = LogFactory.getLog(TestMutationWriteToWAL.class);
+
+  protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  protected static ThriftServerRunner serverRunner;
+  protected static int thriftServerPort;
+
+  private static List<TSocket> clientSockets = new ArrayList<TSocket>();
+  
+  private static ExecutorService thriftExec = Executors.newSingleThreadExecutor();
+  private static Future<?> thriftFuture;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    Thread.currentThread().setName("tearDownAfterClass");
+    Configuration conf = TEST_UTIL.getConfiguration();
+    thriftServerPort = HBaseTestingUtility.randomFreePort();
+    conf.setInt(HConstants.THRIFT_PROXY_PREFIX + HConstants.THRIFT_PORT_SUFFIX,
+        thriftServerPort);
+    TEST_UTIL.startMiniCluster();
+    serverRunner = new ThriftServerRunner(conf, HConstants.THRIFT_PROXY_PREFIX);
+
+    thriftFuture = thriftExec.submit(new Runnable() {
+      @Override
+      public void run() {
+        Thread.currentThread().setName("thriftServerThread");
+        serverRunner.run();
+      }
+    });
+    
+    LOG.info("Waiting for Thrift server to come online");
+    HBaseTestingUtility.waitForHostPort(HConstants.LOCALHOST, thriftServerPort);
+    LOG.info("Thrift server is online");
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    HBaseTestingUtility.setThreadNameFromMethod();
+    closeClientSockets();
+    LOG.info("Shutting down Thrift server");
+    serverRunner.shutdown();
+    thriftFuture.get();  // this will throw an exception if the Thrift thread failed
+    TEST_UTIL.shutdownMiniCluster();
+  }
+  
+  public static Hbase.Client createClient() throws TTransportException, UnknownHostException {
+    LOG.info("Connecting Thrift server on port " + thriftServerPort);
+    TSocket sock = new TSocket(InetAddress.getLocalHost().getHostName(),
+        thriftServerPort);
+    TTransport transport = new TFramedTransport(sock);
+    sock.open();
+    synchronized (clientSockets) {
+      clientSockets.add(sock);
+    }
+    TProtocol prot = new TBinaryProtocol(transport);
+    return new Hbase.Client(prot);
+  }
+  
+  public static void closeClientSockets() {
+    synchronized (clientSockets) {
+      for (TSocket sock : clientSockets) {
+        sock.close();
+      }
+      clientSockets.clear();
+    }
+  }
+  
+}
+



Mime
View raw message