hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r786919 [1/2] - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ bin/ conf/ src/java/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/ja...
Date Sat, 20 Jun 2009 22:29:05 GMT
Author: apurtell
Date: Sat Jun 20 22:29:03 2009
New Revision: 786919

URL: http://svn.apache.org/viewvc?rev=786919&view=rev
Log:
HBASE-1536, HBASE-1543, HBASE-1544, HBASE-1488, HBASE-1531, HBASE-1549, HBASE-1534, HBASE-1387, HBASE-1545, HBASE-1547, HBASE-1553, HBASE-1488, HBASE-1541, HBASE-1540, HBASE-1552, HBASE-1550, HBASE-1515, HBASE-1535

Added:
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/ClassSize.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/thrift/TestThriftServer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
Removed:
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableIndex.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/thrift/DisabledTestThriftServer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/DisabledTestMergeTool.java
Modified:
    hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
    hadoop/hbase/trunk_on_hadoop-0.18.3/bin/HBase.rb
    hadoop/hbase/trunk_on_hadoop-0.18.3/bin/hbase-daemon.sh
    hadoop/hbase/trunk_on_hadoop-0.18.3/bin/zookeeper.sh
    hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Delete.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Get.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Put.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/Filter.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowInclusiveStopFilter.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowPrefixFilter.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/package-info.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HeapSize.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Bytes.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/overview.html
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestClient.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestRowPrefixFilter.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestMemcache.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt Sat Jun 20 22:29:03 2009
@@ -196,8 +196,21 @@
    HBASE-1447  Take last version of the hbase-1249 design doc. and make
                documentation out of it
    HBASE-1206  Scanner spins when there are concurrent inserts to column family
-   HBASE-1536  Controlled crash of regionserver not hosting meta/root leaves master 
-               in spinning state, regions not reassigned
+   HBASE-1536  Controlled crash of regionserver not hosting meta/root leaves
+               master in spinning state, regions not reassigned
+   HBASE-1543  Unnecessary toString during scanning costs us some CPU
+   HBASE-1544  Cleanup HTable (Jonathan Gray via Stack)
+   HBASE-1488  After 1304 goes in, fix and reenable test of thrift, mr indexer,
+               and merge tool
+   HBASE-1531  Change new Get to use new filter API
+   HBASE-1549  in zookeeper.sh, use localhost instead of 127.0.0.1
+   HBASE-1534  Got ZooKeeper event, state: Disconnected on HRS and then NPE on
+               reinit
+   HBASE-1387  Before release verify all object sizes using Ryans' instrumented
+               JVM trick (Erik Holstad via Stack)
+   HBASE-1545  atomicIncrements creating new values with Long.MAX_VALUE
+   HBASE-1547  atomicIncrement doesnt increase hregion.memcacheSize
+   HBASE-1553  ClassSize missing in trunk
 
   IMPROVEMENTS
    HBASE-1089  Add count of regions on filesystem to master UI; add percentage
@@ -358,9 +371,20 @@
    HBASE-1538  Up zookeeper timeout from 10 seconds to 30 seconds to cut down
                on hbase-user traffic
    HBASE-1539  prevent aborts due to missing zoo.cfg
+   HBASE-1488  Fix TestThriftServer and re-enable it
+   HBASE-1541  Scanning multiple column families in the presence of deleted 
+               families results in bad scans
+   HBASE-1540  Client delete unit test, define behavior
+               (Jonathan Gray via Stack)
+   HBASE-1552  provide version running on cluster via getClusterStatus
+   HBASE-1550  hbase-daemon.sh stop should provide more information when stop
+               command fails
+   HBASE-1515  Address part of config option hbase.regionserver unnecessary
 
   OPTIMIZATIONS
    HBASE-1412  Change values for delete column and column family in KeyValue
+   HBASE-1535  Add client ability to perform mutations without the WAL
+               (Jon Gray via Stack)
 
 Release 0.19.0 - 01/21/2009
   INCOMPATIBLE CHANGES

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/bin/HBase.rb
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/bin/HBase.rb?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/bin/HBase.rb (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/bin/HBase.rb Sat Jun 20 22:29:03 2009
@@ -262,6 +262,7 @@
     def status(format)
       status = @admin.getClusterStatus()
       if format != nil and format == "detailed"
+        puts("version %s" % [ status.getHBaseVersion() ])
         puts("%d live servers" % [ status.getServers() ])
         for server in status.getServerInfo()
           puts("    %s:%d %d" % \

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/bin/hbase-daemon.sh
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/bin/hbase-daemon.sh?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/bin/hbase-daemon.sh (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/bin/hbase-daemon.sh Sat Jun 20 22:29:03 2009
@@ -153,10 +153,11 @@
         done
         echo
       else
-        echo no $command to stop
+        retval=$?
+        echo no $command to stop because kill of pid `cat $pid` failed with status $retval
       fi
     else
-      echo no $command to stop
+      echo no $command to stop because no pid file $pid
     fi
     ;;
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/bin/zookeeper.sh
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/bin/zookeeper.sh?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/bin/zookeeper.sh (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/bin/zookeeper.sh Sat Jun 20 22:29:03 2009
@@ -53,7 +53,7 @@
 fi
 
 if [ "$HBASE_MANAGES_ZK" = "true" ]; then
- ssh $HBASE_SSH_OPTS 127.0.0.1 $"${@// /\\ }" 2>&1 | sed "s/^/localhost: /" &
+ ssh $HBASE_SSH_OPTS localhost $"${@// /\\ }" 2>&1 | sed "s/^/localhost: /" &
  if [ "$HBASE_SLAVE_SLEEP" != "" ]; then
    sleep $HBASE_SLAVE_SLEEP
  fi

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml Sat Jun 20 22:29:03 2009
@@ -87,9 +87,9 @@
     period.</description>
   </property>
   <property>
-    <name>hbase.regionserver</name>
-    <value>0.0.0.0:60020</value>
-    <description>The host and port a HBase region server runs at.
+    <name>hbase.regionserver.port</name>
+    <value>60020</value>
+    <description>The port an HBase region server binds to.
     </description>
   </property>
   <property>

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java Sat Jun 20 22:29:03 2009
@@ -45,6 +45,8 @@
  */
 public class ClusterStatus extends VersionedWritable {
   private static final byte VERSION = 0;
+
+  private String hbaseVersion;
   private Collection<HServerInfo> liveServerInfo;
   private Collection<String> deadServers;
 
@@ -120,6 +122,20 @@
   }
 
   /**
+   * @return the HBase version string as reported by the HMaster
+   */
+  public String getHBaseVersion() {
+    return hbaseVersion;
+  }
+
+  /**
+   * @param version the HBase version string
+   */
+  public void setHBaseVersion(String version) {
+    hbaseVersion = version;
+  }
+
+  /**
    * @see java.lang.Object#equals(java.lang.Object)
    */
   public boolean equals(Object o) {
@@ -130,6 +146,7 @@
       return false;
     }
     return (getVersion() == ((ClusterStatus)o).getVersion()) &&
+      getHBaseVersion().equals(((ClusterStatus)o).getHBaseVersion()) &&
       liveServerInfo.equals(((ClusterStatus)o).liveServerInfo) &&
       deadServers.equals(((ClusterStatus)o).deadServers);
   }
@@ -138,7 +155,8 @@
    * @see java.lang.Object#hashCode()
    */
   public int hashCode() {
-    return VERSION + liveServerInfo.hashCode() + deadServers.hashCode();
+    return VERSION + hbaseVersion.hashCode() + liveServerInfo.hashCode() +
+      deadServers.hashCode();
   }
 
   /** @return the object version number */
@@ -179,6 +197,7 @@
 
   public void write(DataOutput out) throws IOException {
     super.write(out);
+    out.writeUTF(hbaseVersion);
     out.writeInt(liveServerInfo.size());
     for (HServerInfo server: liveServerInfo) {
       server.write(out);
@@ -191,6 +210,7 @@
 
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
+    hbaseVersion = in.readUTF();
     int count = in.readInt();
     liveServerInfo = new ArrayList<HServerInfo>(count);
     for (int i = 0; i < count; i++) {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java Sat Jun 20 22:29:03 2009
@@ -66,6 +66,9 @@
   /** default host address */
   static final String DEFAULT_HOST = "0.0.0.0";
 
+  /** Parameter name for port master listens on. */
+  static final String MASTER_PORT = "hbase.master.port";
+
   /** default port that the master listens on */
   static final int DEFAULT_MASTER_PORT = 60000;
 
@@ -85,11 +88,11 @@
   /** Default ZooKeeper pause value. In milliseconds. */
   static final int DEFAULT_ZOOKEEPER_PAUSE = 2 * 1000;
 
-  /** Parameter name for hbase.regionserver address. */
-  static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
-  
-  /** Default region server address */
-  static final String DEFAULT_REGIONSERVER_ADDRESS = DEFAULT_HOST + ":60020";
+  /** Parameter name for port region server listens on. */
+  static final String REGIONSERVER_PORT = "hbase.regionserver.port";
+
+  /** Default port region server listens on. */
+  static final int DEFAULT_REGIONSERVER_PORT = 60020;
 
   /** default port for region server web api */
   static final int DEFAULT_REGIONSERVER_INFOPORT = 60030;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java Sat Jun 20 22:29:03 2009
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.Writable;
 
@@ -1783,9 +1784,8 @@
   
   // HeapSize
   public long heapSize() {
-    int dataLen = bytes.length + (bytes.length % 8);
-    return HeapSize.OBJECT + HeapSize.BYTE_ARRAY + dataLen +
-      (2 * HeapSize.INT);
+    return ClassSize.alignSize(HeapSize.OBJECT + HeapSize.REFERENCE + 
+        HeapSize.BYTE_ARRAY + length + (2 * Bytes.SIZEOF_INT));
   }
   
   // Writable

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java Sat Jun 20 22:29:03 2009
@@ -95,7 +95,7 @@
     // Start the HRegionServers.  Always have region servers come up on
     // port '0' so there won't be clashes over default port as unit tests
     // start/stop ports at different times during the life of the test.
-    conf.set(REGIONSERVER_ADDRESS, DEFAULT_HOST + ":0");
+    conf.set(REGIONSERVER_PORT, "0");
     this.regionThreads = new ArrayList<RegionServerThread>();
     regionServerClass = (Class<? extends HRegionServer>) conf.getClass(HConstants.REGION_SERVER_IMPL, HRegionServer.class);
     for (int i = 0; i < noRegionServers; i++) {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Delete.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Delete.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Delete.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Delete.java Sat Jun 20 22:29:03 2009
@@ -105,6 +105,14 @@
   }
 
   /**
+   * Method to check if the familyMap is empty
+   * @return true if empty, false otherwise
+   */
+  public boolean isEmpty() {
+    return familyMap.isEmpty();
+  }
+
+  /**
    * Delete all versions of all columns of the specified family.
    * <p>
    * Overrides previous calls to deleteColumn and deleteColumns for the

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Get.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Get.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Get.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Get.java Sat Jun 20 22:29:03 2009
@@ -29,7 +29,7 @@
 import java.util.TreeSet;
 
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
+import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.HbaseObjectWritable;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -63,7 +63,7 @@
   private byte [] row = null;
   private long lockId = -1L;
   private int maxVersions = 1;
-  private RowFilterInterface filter = null;
+  private Filter filter = null;
   private TimeRange tr = new TimeRange();
   private Map<byte [], NavigableSet<byte []>> familyMap = 
     new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
@@ -204,14 +204,23 @@
 
   /**
    * Apply the specified server-side filter when performing the Get.
+   * Only {@link Filter#filterKeyValue(KeyValue)} is called AFTER all tests
+   * for ttl, column match, deletes and max versions have been run.
    * @param filter filter to run on the server
    */
-  public Get setFilter(RowFilterInterface filter) {
+  public Get setFilter(Filter filter) {
     this.filter = filter;
     return this;
   }
 
-  /** Accessors */
+  /* Accessors */
+
+  /**
+   * @return Filter
+   */
+  public Filter getFilter() {
+    return this.filter;
+  }
 
   /**
    * Method for retrieving the get's row
@@ -341,9 +350,8 @@
     this.lockId = in.readLong();
     this.maxVersions = in.readInt();
     boolean hasFilter = in.readBoolean();
-    if(hasFilter) {
-      this.filter = 
-        (RowFilterInterface)HbaseObjectWritable.readObject(in, null);
+    if (hasFilter) {
+      this.filter = (Filter)HbaseObjectWritable.readObject(in, null);
     }
     this.tr = new TimeRange();
     tr.readFields(in);
@@ -375,8 +383,7 @@
       out.writeBoolean(false);
     } else {
       out.writeBoolean(true);
-      HbaseObjectWritable.writeObject(out, this.filter, 
-          RowFilterInterface.class, null);
+      HbaseObjectWritable.writeObject(out, this.filter, Filter.class, null);
     }
     tr.write(out);
     out.writeInt(familyMap.size());
@@ -395,4 +402,4 @@
       }
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java Sat Jun 20 22:29:03 2009
@@ -535,6 +535,27 @@
       ).booleanValue();
   }
   
+  /**
+   * Test for the existence of columns in the table, as specified in the Get.<p>
+   * 
+   * This will return true if the Get matches one or more keys, false if not.<p>
+   * 
+   * This is a server-side call so it prevents any data from being transfered
+   * to the client.
+   * @param get
+   * @return true if the specified Get matches one or more keys, false if not
+   * @throws IOException
+   */
+  public boolean exists(final Get get) throws IOException {
+    return connection.getRegionServerWithRetries(
+      new ServerCallable<Boolean>(connection, tableName, get.getRow()) {
+        public Boolean call() throws IOException {
+          return Boolean.valueOf(server.
+            exists(location.getRegionInfo().getRegionName(), get));
+        }
+      }
+    ).booleanValue();
+  }
   
   /**
    * Commit to the table the buffer of BatchUpdate.
@@ -821,6 +842,7 @@
    * @param ts timestamp
    * @return RowResult is <code>null</code> if row does not exist.
    * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)}
    */
   public RowResult getRow(final String row, final long ts) 
   throws IOException {
@@ -841,6 +863,17 @@
     return getRow(row,null,ts);
   }
   
+  /** 
+   * Get more than one version of all columns for the specified row
+   * at a specified timestamp
+   * 
+   * @param row row key
+   * @param ts timestamp
+   * @param numVersions number of versions to return
+   * @return RowResult is <code>null</code> if row does not exist.
+   * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)}
+   */
   public RowResult getRow(final String row, final long ts,
       final int numVersions) throws IOException {
     return getRow(Bytes.toBytes(row), null, ts, numVersions, null);
@@ -953,13 +986,24 @@
     return getRow(row,columns,ts,1,null);
   }
   
+  /** 
+   * Get more than one version of selected columns for the specified row,
+   * using an existing row lock.
+   * 
+   * @param row row key
+   * @param columns Array of column names and families you want to retrieve.
+   * @param numVersions number of versions to return
+   * @param rowLock previously acquired row lock
+   * @return RowResult is <code>null</code> if row does not exist.
+   * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)}
+   */
   public RowResult getRow(final String row, final String[] columns,
       final long timestamp, final int numVersions, final RowLock rowLock)
   throws IOException {
     return getRow(Bytes.toBytes(row), Bytes.toByteArrays(columns), timestamp,
                   numVersions, rowLock);
   }
-  
 
   /** 
    * Get selected columns for the specified row at a specified timestamp
@@ -1255,6 +1299,7 @@
    *
    * @param row Key of the row you want to completely delete.
    * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)}
    */
   public void deleteAll(final byte [] row) throws IOException {
     deleteAll(row, null);
@@ -1265,6 +1310,7 @@
    *
    * @param row Key of the row you want to completely delete.
    * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)}
    */
   public void deleteAll(final String row) throws IOException {
     deleteAll(row, null);
@@ -1378,7 +1424,7 @@
    */
   public void deleteAllByRegex(final String row, final String colRegex)
   throws IOException {
-    deleteAll(row, colRegex, HConstants.LATEST_TIMESTAMP);
+    deleteAllByRegex(row, colRegex, HConstants.LATEST_TIMESTAMP);
   }
 
   /** 
@@ -1588,6 +1634,7 @@
    * @param row The row
    * @return true if the row exists, false otherwise
    * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #exists(Get)}
    */
   public boolean exists(final byte [] row) throws IOException {
     return exists(row, null, HConstants.LATEST_TIMESTAMP, null);
@@ -1600,6 +1647,7 @@
    * @param column The column
    * @return true if the row exists, false otherwise
    * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #exists(Get)}
    */
   public boolean exists(final byte [] row, final byte[] column)
   throws IOException {
@@ -1614,6 +1662,7 @@
    * @param timestamp The timestamp
    * @return true if the specified coordinate exists
    * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #exists(Get)}
    */
   public boolean exists(final byte [] row, final byte [] column,
       long timestamp) throws IOException {
@@ -1629,20 +1678,14 @@
    * @param rl Existing row lock
    * @return true if the specified coordinate exists
    * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #exists(Get)}
    */
   public boolean exists(final byte [] row, final byte [] column,
       final long timestamp, final RowLock rl) throws IOException {
     final Get g = new Get(row, rl);
     g.addColumn(column);
     g.setTimeStamp(timestamp);
-    return connection.getRegionServerWithRetries(
-      new ServerCallable<Boolean>(connection, tableName, row) {
-        public Boolean call() throws IOException {
-          return Boolean.valueOf(server.
-            exists(location.getRegionInfo().getRegionName(), g));
-        }
-      }
-    ).booleanValue();
+    return exists(g);
   }
 
   /**
@@ -1695,18 +1738,20 @@
   }
   
   /**
-   * Atomically checks if a row's values match
-   * the expectedValues. If it does, it uses the
-   * batchUpdate to update the row.
+   * Atomically checks if a row's values match the expectedValues. 
+   * If it does, it uses the batchUpdate to update the row.<p>
+   * 
+   * This operation is not currently supported, use {@link #checkAndPut}
    * @param batchUpdate batchupdate to apply if check is successful
    * @param expectedValues values to check
    * @param rl rowlock
    * @throws IOException
+   * @deprecated As of hbase 0.20.0, replaced by {@link #checkAndPut}
    */
   public synchronized boolean checkAndSave(final BatchUpdate batchUpdate,
     final HbaseMapWritable<byte[],byte[]> expectedValues, final RowLock rl)
   throws IOException {
-    throw new UnsupportedOperationException("TODO: Not yet implemented");
+    throw new UnsupportedOperationException("Replaced by checkAndPut");
   }
 
   /**
@@ -2026,4 +2071,4 @@
       };
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Put.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Put.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Put.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Put.java Sat Jun 20 22:29:03 2009
@@ -33,6 +33,7 @@
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
 
 
 /** 
@@ -46,10 +47,16 @@
   private byte [] row = null;
   private long timestamp = HConstants.LATEST_TIMESTAMP;
   private long lockId = -1L;
+  private boolean writeToWAL = true;
+  
   private Map<byte [], List<KeyValue>> familyMap =
     new TreeMap<byte [], List<KeyValue>>(Bytes.BYTES_COMPARATOR);
   
-  /** Constructor for Writable.  DO NOT USE */
+  private static final long OVERHEAD = ClassSize.alignSize(HeapSize.OBJECT + 
+      1 * HeapSize.REFERENCE + 1 * HeapSize.ARRAY + 2 * Bytes.SIZEOF_LONG + 
+      1 * Bytes.SIZEOF_BOOLEAN + 1 * HeapSize.REFERENCE + HeapSize.TREEMAP_SIZE);
+  
+  /** Constructor for Writable. DO NOT USE */
   public Put() {}
   
   /**
@@ -104,12 +111,12 @@
    * its version to this Put operation.
    * @param column Old style column name with family and qualifier put together
    * with a colon.
-   * @param timestamp version timestamp
+   * @param ts version timestamp
    * @param value column value
    */
-  public void add(byte [] column, long timestamp, byte [] value) {
+  public void add(byte [] column, long ts, byte [] value) {
     byte [][] parts = KeyValue.parseColumn(column);
-    add(parts[0], parts[1], timestamp, value);
+    add(parts[0], parts[1], ts, value);
   }
 
   /**
@@ -117,15 +124,15 @@
    * its version to this Put operation.
    * @param family family name
    * @param qualifier column qualifier
-   * @param timestamp version timestamp
+   * @param ts version timestamp
    * @param value column value
    */
-  public void add(byte [] family, byte [] qualifier, long timestamp, byte [] value) {
+  public void add(byte [] family, byte [] qualifier, long ts, byte [] value) {
     List<KeyValue> list = familyMap.get(family);
     if(list == null) {
-      list = new ArrayList<KeyValue>();
+      list = new ArrayList<KeyValue>(0);
     }
-    KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, 
+    KeyValue kv = new KeyValue(this.row, family, qualifier, ts, 
       KeyValue.Type.Put, value); 
     list.add(kv);
     familyMap.put(family, list);
@@ -207,6 +214,22 @@
   }
   
   /**
+   * @return true if edits should be applied to WAL, false if not
+   */
+  public boolean writeToWAL() {
+    return this.writeToWAL;
+  }
+  
+  /**
+   * Set whether this Put should be written to the WAL or not.
+   * Not writing the WAL means you may lose edits on server crash.
+   * @param write true if edits should be written to WAL, false if not
+   */
+  public void writeToWAL(boolean write) {
+    this.writeToWAL = write;
+  }
+  
+  /**
    * @return String 
    */
   @Override
@@ -246,13 +269,29 @@
   
   //HeapSize
   public long heapSize() {
-  	long totalSize = 0;
-  	for(Map.Entry<byte [], List<KeyValue>> entry : this.familyMap.entrySet()) {
-  	  for(KeyValue kv : entry.getValue()) {
-  		totalSize += kv.heapSize();
-  	  }
-  	}
-    return totalSize;
+    long heapsize = OVERHEAD;
+    heapsize += ClassSize.alignSize(this.row.length);
+
+    
+    for(Map.Entry<byte [], List<KeyValue>> entry : this.familyMap.entrySet()) {
+      //Adding entry overhead
+      heapsize += HeapSize.MAP_ENTRY_SIZE;
+      
+      //Adding key overhead
+      heapsize += HeapSize.REFERENCE + HeapSize.ARRAY + 
+        ClassSize.alignSize(entry.getKey().length);
+      
+      //This part is kinds tricky since the JVM can reuse references if you
+      //store the same value, but have a good match with SizeOf at the moment
+      //Adding value overhead
+      heapsize += HeapSize.REFERENCE + HeapSize.ARRAYLIST_SIZE;
+      int size = entry.getValue().size();
+      heapsize += size * HeapSize.REFERENCE;
+      for(KeyValue kv : entry.getValue()) {
+        heapsize += kv.heapSize();
+      }
+    }
+    return heapsize;
   }
   
   //Writable
@@ -261,6 +300,7 @@
     this.row = Bytes.readByteArray(in);
     this.timestamp = in.readLong();
     this.lockId = in.readLong();
+    this.writeToWAL = in.readBoolean();
     int numFamilies = in.readInt();
     this.familyMap = 
       new TreeMap<byte [],List<KeyValue>>(Bytes.BYTES_COMPARATOR);
@@ -286,6 +326,7 @@
     Bytes.writeByteArray(out, this.row);
     out.writeLong(this.timestamp);
     out.writeLong(this.lockId);
+    out.writeBoolean(this.writeToWAL);
     out.writeInt(familyMap.size());
     for(Map.Entry<byte [], List<KeyValue>> entry : familyMap.entrySet()) {
       Bytes.writeByteArray(out, entry.getKey());

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java Sat Jun 20 22:29:03 2009
@@ -235,6 +235,8 @@
 
   public Cell getCellValue(byte[] family, byte[] qualifier) {
     Map.Entry<Long,byte[]> val = getKeyValue(family, qualifier);
+    if (val == null)
+      return null;
     return new Cell(val.getValue(), val.getKey());
   }
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java Sat Jun 20 22:29:03 2009
@@ -85,7 +85,6 @@
   public Scan(byte [] startRow, Filter filter) {
     this(startRow);
     this.filter = filter;
-
   }
   
   /**
@@ -118,7 +117,6 @@
   public Scan addFamily(byte [] family) {
     familyMap.remove(family);
     familyMap.put(family, null);
-
     return this;
   }
   
@@ -204,7 +202,7 @@
    * Get all available versions.
    */
   public Scan setMaxVersions() {
-  	this.maxVersions = Integer.MAX_VALUE;
+    this.maxVersions = Integer.MAX_VALUE;
     return this;
   }
 
@@ -236,7 +234,6 @@
    */
   public Scan setOldFilter(RowFilterInterface filter) {
     oldFilter = filter;
-
     return this;
   }
   
@@ -246,7 +243,6 @@
    */
   public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
     this.familyMap = familyMap;
-
     return this;
   }
   

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java?rev=786919&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java Sat Jun 20 22:29:03 2009
@@ -0,0 +1,59 @@
+package org.apache.hadoop.hbase.filter;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.KeyValue;
+
+/**
+ * Simple filter that returns first N columns on row only.
+ * This filter was written to test filters in Get and as soon as it gets
+ * its quota of columns, {@link #filterAllRemaining()} returns true.  This
+ * makes this filter unsuitable as a Scan filter.
+ */
+public class ColumnCountGetFilter implements Filter {
+  private int limit = 0;
+  private int count = 0;
+
+  /**
+   * Used during serialization.
+   * Do not use.
+   */
+  public ColumnCountGetFilter() {
+    super();
+  }
+
+  public ColumnCountGetFilter(final int n) {
+    this.limit = n;
+  }
+
+  public boolean filterAllRemaining() {
+    return this.count > this.limit;
+  }
+
+  public ReturnCode filterKeyValue(KeyValue v) {
+    this.count++;
+    return filterAllRemaining()? ReturnCode.SKIP: ReturnCode.INCLUDE;
+  }
+
+  public boolean filterRow() {
+    return false;
+  }
+
+  public boolean filterRowKey(byte[] buffer, int offset, int length) {
+    return false;
+  }
+
+  public void reset() {
+    this.count = 0;
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    this.limit = in.readInt();
+  }
+
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(this.limit);
+  }
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/Filter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/Filter.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/Filter.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/Filter.java Sat Jun 20 22:29:03 2009
@@ -98,7 +98,7 @@
      * still be called.
      */
     NEXT_ROW,
-  };
+  }
 
   /**
    * Last chance to veto row based on previous {@link #filterKeyValue(KeyValue)}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowInclusiveStopFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowInclusiveStopFilter.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowInclusiveStopFilter.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowInclusiveStopFilter.java Sat Jun 20 22:29:03 2009
@@ -28,8 +28,8 @@
 import java.io.DataInput;
 
 /**
- * A Filter that stops after the given row.  There is no "RowStopFilter" because the Scan
- * spec allows you to specify a stop row.
+ * A Filter that stops after the given row.  There is no "RowStopFilter" because
+ * the Scan spec allows you to specify a stop row.
  *
  * Use this filter to include the stop row, eg: [A,Z].
  */
@@ -86,4 +86,4 @@
   public void readFields(DataInput in) throws IOException {
     this.stopRowKey = Bytes.readByteArray(in);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowPrefixFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowPrefixFilter.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowPrefixFilter.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/RowPrefixFilter.java Sat Jun 20 22:29:03 2009
@@ -27,30 +27,35 @@
 import java.io.IOException;
 import java.io.DataInput;
 
+/**
+ * Pass results that have same row prefix.
+ */
 public class RowPrefixFilter implements Filter {
-
-  protected byte [] prefix;
+  protected byte [] prefix = null;
 
   public RowPrefixFilter(final byte [] prefix) {
     this.prefix = prefix;
   }
 
   public RowPrefixFilter() {
+    super();
   }
 
   @Override
   public void reset() {
+    // Noop
   }
 
   @Override
   public boolean filterRowKey(byte[] buffer, int offset, int length) {
-    if (buffer == null)
+    if (buffer == null || this.prefix == null)
       return true;
     if (length < prefix.length)
       return true;
     // if they are equal, return false => pass row
     // else return true, filter row
-    return Bytes.compareTo(buffer, offset, prefix.length, prefix, 0, prefix.length) != 0;
+    return Bytes.compareTo(buffer, offset, this.prefix.length, this.prefix, 0,
+      this.prefix.length) != 0;
   }
 
   @Override
@@ -70,11 +75,11 @@
 
   @Override
   public void write(DataOutput out) throws IOException {
-    Bytes.writeByteArray(out, prefix);
+    Bytes.writeByteArray(out, this.prefix);
   }
 
   @Override
   public void readFields(DataInput in) throws IOException {
-    prefix = Bytes.readByteArray(in);
+    this.prefix = Bytes.readByteArray(in);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/package-info.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/package-info.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/package-info.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/package-info.java Sat Jun 20 22:29:03 2009
@@ -17,11 +17,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-/**Provides row-level filters applied to HRegion scan results during calls to {@link org.apache.hadoop.hbase.client.ResultScanner#next()}. 
+/**Provides row-level filters applied to HRegion scan results during calls to
+ * {@link org.apache.hadoop.hbase.client.ResultScanner#next()}. 
 
-<p>Use {@link org.apache.hadoop.hbase.filter.StopRowFilter} to stop the scan once rows exceed the supplied row key.
-Filters will not stop the scan unless hosted inside of a {@link org.apache.hadoop.hbase.filter.WhileMatchRowFilter}.
-Supply a set of filters to apply using {@link org.apache.hadoop.hbase.filter.RowFilterSet}.  
+<p>Since HBase 0.20.0, {@link Filter} is the new Interface used filtering.
+It replaces the deprecated {@link RowFilterInterface}.
+Filters run the extent of a table unless you wrap your filter in a
+{@link RowWhileMatchFilter}.  The latter returns as soon as the filter
+stops matching.
 </p>
 */
 package org.apache.hadoop.hbase.filter;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HeapSize.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HeapSize.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HeapSize.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HeapSize.java Sat Jun 20 22:29:03 2009
@@ -45,36 +45,35 @@
   /** Object overhead is minimum 2 * reference size (8 bytes on 64-bit) */
   static final int OBJECT = 2 * REFERENCE;
   
-  /**
-   * The following types are always allocated in blocks of 8 bytes (on 64bit)
-   * For example, if you have two ints in a class, it will use 8 bytes.
-   * If you have three ints in a class, it will use 16 bytes.
-   */
-  static final int SHORT = 4;
-  static final int INT = 4;
-  static final int FLOAT = 4;
-  static final int BOOLEAN = 4;
-  static final int CHAR = 4;
-  static final int BYTE = 1;
-  
-  /** These types are always 8 bytes */
-  static final int DOUBLE = 8;
-  static final int LONG = 8;
-  
   /** Array overhead */
   static final int ARRAY = 3 * REFERENCE;
+
+  /** OverHead for nested arrays */
   static final int MULTI_ARRAY = (4 * REFERENCE) + ARRAY;
   
   /** Byte arrays are fixed size below plus its length, 8 byte aligned */
   static final int BYTE_ARRAY = 3 * REFERENCE;
   
-  static final int BLOCK_SIZE_TAX = 8;
-
+  /** Overhead for ByteBuffer */
   static final int BYTE_BUFFER = 56;
-
+  
+  /** String overhead */
+  static final int STRING_SIZE = 64;
+  
+  /** Overhead for ArrayList(0) */
+  static final int ARRAYLIST_SIZE = 64;
+  
+  /** Overhead for TreeMap */
+  static final int TREEMAP_SIZE = 80;
+  
+  /** Overhead for entry in map */
+  static final int MAP_ENTRY_SIZE = 64;
+  
+  
   /**
    * @return Approximate 'exclusive deep size' of implementing object.  Includes
    * count of payload and hosting object sizings.
   */
   public long heapSize();
+  
 }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Sat Jun 20 22:29:03 2009
@@ -41,6 +41,7 @@
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.RawComparator;
@@ -1333,11 +1334,15 @@
     long [] blockOffsets;
     int [] blockDataSizes;
     int size = 0;
-  
+
     /* Needed doing lookup on blocks.
      */
     final RawComparator<byte []> comparator;
   
+    static final int OVERHEAD = (int)ClassSize.alignSize(HeapSize.OBJECT + 
+        2 * Bytes.SIZEOF_INT + 1 * HeapSize.MULTI_ARRAY +  2 * HeapSize.ARRAY + 
+        4 * HeapSize.REFERENCE);
+    
     /*
      * Shutdown default constructor
      */
@@ -1493,8 +1498,25 @@
     }
 
     public long heapSize() {
-      return this.size;
+      long size = OVERHEAD;
+      
+      //Calculating the size of blockKeys 
+      if(blockKeys != null) {
+        for(byte [] bs : blockKeys) {
+          size += HeapSize.MULTI_ARRAY;
+          size += ClassSize.alignSize(bs.length);
+        }
+      }
+      if(blockOffsets != null) {
+        size += blockOffsets.length * Bytes.SIZEOF_LONG;
+      }
+      if(blockDataSizes != null) {
+        size += blockDataSizes.length * Bytes.SIZEOF_INT;
+      }
+      
+      return size;
     }
+    
   }
 
   /*

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Sat Jun 20 22:29:03 2009
@@ -24,6 +24,8 @@
 
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -63,10 +65,6 @@
   /** The default load factor to use */
   public static final float DEFAULT_LOAD_FACTOR = 0.75f;
   
-  /** Memory overhead of this Object (for HeapSize) */
-  private static final int OVERHEAD = 5 * HeapSize.LONG + 2 * HeapSize.INT +
-    2 * HeapSize.FLOAT + 3 * HeapSize.REFERENCE + 1 * HeapSize.ARRAY;
-  
   /** Load factor allowed (usually 75%) */
   private final float loadFactor;
   /** Number of key/vals in the map */
@@ -91,6 +89,11 @@
   /** Number of unsuccessful (not found) get() calls */
   private long missCount = 0;
 
+  /** Memory overhead of this Object (for HeapSize) */
+  private static final int OVERHEAD = (int)ClassSize.alignSize(HeapSize.OBJECT +
+      1 * Bytes.SIZEOF_FLOAT + 2 * Bytes.SIZEOF_INT + 1 * HeapSize.ARRAY + 
+      3 * HeapSize.REFERENCE + 4 * Bytes.SIZEOF_LONG);
+  
   /**
    * Constructs a new, empty map with the specified initial capacity,
    * load factor, and maximum memory usage.
@@ -266,8 +269,7 @@
    * @return hit ratio (double between 0 and 1)
    */
   public double getHitRatio() {
-    return (double)((double)hitCount/
-      ((double)(hitCount+missCount)));
+    return ((double)hitCount) / ((double)(hitCount+missCount));
   }
   
   /**
@@ -955,10 +957,6 @@
    */
   protected static class Entry
   implements Map.Entry<String,ByteBuffer>, HeapSize {
-    /** The baseline overhead memory usage of this class */
-    static final int OVERHEAD = 1 * HeapSize.LONG + 5 * HeapSize.REFERENCE + 
-      2 * HeapSize.INT;
-    
     /** The key */
     protected final String key;
     /** The value */
@@ -976,6 +974,10 @@
     /** The precomputed heap size of this entry */
     protected long heapSize;
 
+    /** The baseline overhead memory usage of this class */
+    static final int OVERHEAD = HeapSize.OBJECT + 5 * HeapSize.REFERENCE + 
+      1 * Bytes.SIZEOF_INT + 1 * Bytes.SIZEOF_LONG;
+    
     /**
      * Create a new entry.
      *
@@ -1137,7 +1139,8 @@
      * @return size of String in bytes
      */
     private long heapSize(String s) {
-      return HeapSize.OBJECT + alignSize(s.length()*2);
+      return HeapSize.STRING_SIZE + 
+        ClassSize.alignSize(s.length() * Bytes.SIZEOF_CHAR);
     }
     
     /**
@@ -1145,18 +1148,9 @@
      * @return size of ByteBuffer in bytes
      */
     private long heapSize(ByteBuffer b) {
-      return HeapSize.BYTE_BUFFER + alignSize(b.capacity());
+      return HeapSize.BYTE_BUFFER + ClassSize.alignSize(b.capacity());
     }
     
-    /**
-     * Aligns a number to 8.
-     * @param num number to align to 8
-     * @return smallest number >= input that is a multiple of 8
-     */
-    private long alignSize(long num) {
-      if(num % 8 == 0) return num;
-      return (num + (8 - (num % 8)));
-    }
   }
 }
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java Sat Jun 20 22:29:03 2009
@@ -80,6 +80,7 @@
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Sleeper;
 import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
 import org.apache.hadoop.io.MapWritable;
 import org.apache.hadoop.io.Text;
@@ -985,6 +986,7 @@
    */
   public ClusterStatus getClusterStatus() {
     ClusterStatus status = new ClusterStatus();
+    status.setHBaseVersion(VersionInfo.getVersion());
     status.setServerInfo(serverManager.serversToServerInfo.values());
     status.setDeadServers(serverManager.deadServers);
     return status;
@@ -1075,8 +1077,7 @@
    */
 
   private static void printUsageAndExit() {
-    System.err.println("Usage: java org.apache.hbase.HMaster " +
-    "[--bind=hostname:port] start|stop");
+    System.err.println("Usage: java org.apache.hbase.HMaster start|stop");
     System.exit(0);
   }
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java Sat Jun 20 22:29:03 2009
@@ -89,8 +89,8 @@
     int timeRes = Bytes.compareTo(memBuffer, tsOffset, Bytes.SIZEOF_LONG,
         deleteBuffer, deleteTimeOffset, Bytes.SIZEOF_LONG);
 
-    if(deleteType == KeyValue.Type.DeleteFamily.getCode()) {
-      if(timeRes <= 0){
+    if (deleteType == KeyValue.Type.DeleteFamily.getCode()) {
+      if (timeRes <= 0) {
         return DeleteCode.DELETE;
       }
       return DeleteCode.SKIP;
@@ -99,16 +99,16 @@
     //Compare columns
     res = Bytes.compareTo(memBuffer, memOffset, memQualifierLen,
         deleteBuffer, deleteQualifierOffset, deleteQualifierLength);
-    if(res < 0) {
+    if (res < 0) {
       return DeleteCode.SKIP;
     } else if(res > 0) {
       return DeleteCode.DONE;
     }
     // same column, compare the time.
-    if(timeRes == 0) {
+    if (timeRes == 0) {
       return DeleteCode.DELETE;
     } else if (timeRes < 0) {
-      if(deleteType == KeyValue.Type.DeleteColumn.getCode()) {
+      if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
         return DeleteCode.DELETE;
       }
       return DeleteCode.DONE;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java Sat Jun 20 22:29:03 2009
@@ -43,7 +43,7 @@
   // Time at which this edit was written.
   private long writeTime;
   private int HEAP_TAX = HeapSize.OBJECT + (2 * HeapSize.BYTE_ARRAY) +
-    (2 * HeapSize.LONG);
+    (2 * Bytes.SIZEOF_LONG);
 
   /** Writable Consructor -- Do not use. */
   public HLogKey() {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sat Jun 20 22:29:03 2009
@@ -1176,7 +1176,7 @@
    * @throws IOException
    */
   public void put(Put put) throws IOException {
-    this.put(put, null, true);
+    this.put(put, null, put.writeToWAL());
   }
   
   /**
@@ -1194,7 +1194,7 @@
    * @throws IOException
    */
   public void put(Put put, Integer lockid) throws IOException {
-    this.put(put, lockid, true);
+    this.put(put, lockid, put.writeToWAL());
   }
 
   /**
@@ -1710,7 +1710,7 @@
       if(stopRow != null &&
           comparator.compareRows(stopRow, 0, stopRow.length, 
               currentRow, 0, currentRow.length)
-          <= 0){
+          <= 0) {
         return false;
       }
       this.storeHeap.next(results);
@@ -1721,6 +1721,21 @@
         }
         byte [] row = kv.getRow();
         if(!Bytes.equals(currentRow, row)) {
+          // Next row:
+
+          // what happens if there are _no_ results:
+          if (results.isEmpty()) {
+            // Continue on the next row:
+            currentRow = row;
+
+            // But did we pass the stop row?
+            if (stopRow != null &&
+                comparator.compareRows(stopRow, 0, stopRow.length,
+                    currentRow, 0, currentRow.length) <= 0) {
+              return false;
+            }
+            continue;
+          }
           return true;
         }
         this.storeHeap.next(results);
@@ -2218,12 +2233,12 @@
    */
   public Result get(final Get get, final Integer lockid) throws IOException {
     // Verify families are all valid
-    if(get.hasFamilies()) {
-      for(byte [] family : get.familySet()) {
+    if (get.hasFamilies()) {
+      for (byte [] family: get.familySet()) {
         checkFamily(family);
       }
     } else { // Adding all families to scanner
-      for(byte[] family: regionInfo.getTableDesc().getFamiliesKeys()){
+      for (byte[] family: regionInfo.getTableDesc().getFamiliesKeys()) {
         get.addFamily(family);
       }
     }
@@ -2231,7 +2246,7 @@
     Integer lid = getLock(lockid, get.getRow()); 
     List<KeyValue> result = new ArrayList<KeyValue>();
     try {
-      for(Map.Entry<byte[],NavigableSet<byte[]>> entry:
+      for (Map.Entry<byte[],NavigableSet<byte[]>> entry:
           get.getFamilyMap().entrySet()) {
         get(this.stores.get(entry.getKey()), get, entry.getValue(), result);
       }
@@ -2260,16 +2275,29 @@
       byte [] qualifier, long amount)
   throws IOException {
     checkRow(row);
-    
+
+    boolean flush = false;
     // Lock row
     Integer lid = obtainRowLock(row);
     long result = 0L;
     try {
       Store store = stores.get(family);
-      result = store.incrementColumnValue(row, family, qualifier, amount);
+
+      Store.ValueAndSize vas =
+          store.incrementColumnValue(row, family, qualifier, amount);
+
+      result = vas.value;
+      long size = this.memcacheSize.addAndGet(vas.sizeAdded);
+      flush = isFlushSize(size);
     } finally {
       releaseRowLock(lid);
     }
+
+    if (flush) {
+      // Request a cache flush.  Do it outside update lock.
+      requestFlush();
+    }
+
     return result;
   }
     

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Sat Jun 20 22:29:03 2009
@@ -232,25 +232,22 @@
   // doing a restart() to prevent closing of HDFS.
   private final AtomicBoolean shutdownHDFS = new AtomicBoolean(true);
 
+  private final String machineName;
+
   /**
    * Starts a HRegionServer at the default location
    * @param conf
    * @throws IOException
    */
   public HRegionServer(HBaseConfiguration conf) throws IOException {
-    this(new HServerAddress(conf.get(REGIONSERVER_ADDRESS,
-        DEFAULT_REGIONSERVER_ADDRESS)), conf);
-  }
-  
-  /**
-   * Starts a HRegionServer at the specified location
-   * @param address
-   * @param conf
-   * @throws IOException
-   */
-  public HRegionServer(HServerAddress address, HBaseConfiguration conf)
-  throws IOException {
-    this.address = address;
+    machineName = DNS.getDefaultHost(
+        conf.get("hbase.regionserver.dns.interface","default"),
+        conf.get("hbase.regionserver.dns.nameserver","default"));
+    String addressStr = machineName + ":" + 
+      conf.get(REGIONSERVER_PORT, Integer.toString(DEFAULT_REGIONSERVER_PORT));
+    this.address = new HServerAddress(addressStr);
+    LOG.info("My address is " + address);
+
     this.abortRequested = false;
     this.fsOk = true;
     this.conf = conf;
@@ -291,9 +288,6 @@
       address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
       false, conf);
     this.server.setErrorHandler(this);
-    String machineName = DNS.getDefaultHost(
-        conf.get("hbase.regionserver.dns.interface","default"),
-        conf.get("hbase.regionserver.dns.nameserver","default"));
     // Address is givin a default IP for the moment. Will be changed after
     // calling the master.
     this.serverInfo = new HServerInfo(new HServerAddress(
@@ -1307,9 +1301,11 @@
    * Run initialization using parameters passed us by the master.
    */
   private MapWritable reportForDuty() {
-    if (!getMaster()) {
-      return null;
+    while (!getMaster()) {
+      sleeper.sleep();
+      LOG.warn("Unable to get master for initialization");
     }
+
     MapWritable result = null;
     long lastMsg = 0;
     while(!stopRequested.get()) {
@@ -2334,8 +2330,7 @@
     if (message != null) {
       System.err.println(message);
     }
-    System.err.println("Usage: java " +
-        "org.apache.hbase.HRegionServer [--bind=hostname:port] start");
+    System.err.println("Usage: java org.apache.hbase.HRegionServer start|stop");
     System.exit(0);
   }
   
@@ -2353,13 +2348,7 @@
     
     // Process command-line args. TODO: Better cmd-line processing
     // (but hopefully something not as painful as cli options).
-    final String addressArgKey = "--bind=";
     for (String cmd: args) {
-      if (cmd.startsWith(addressArgKey)) {
-        conf.set(REGIONSERVER_ADDRESS, cmd.substring(addressArgKey.length()));
-        continue;
-      }
-      
       if (cmd.equals("start")) {
         try {
           // If 'local', don't start a region server here.  Defer to

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java Sat Jun 20 22:29:03 2009
@@ -19,11 +19,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.io.HeapSize;
-
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
@@ -31,6 +26,15 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+
+
+
 /**
  * The LruHashMap is a memory-aware HashMap with a configurable maximum
  * memory footprint.
@@ -62,8 +66,9 @@
   private static final float DEFAULT_LOAD_FACTOR = 0.75f;
   
   /** Memory overhead of this Object (for HeapSize) */
-  private static final int OVERHEAD = 5 * HeapSize.LONG + 2 * HeapSize.INT +
-    2 * HeapSize.FLOAT + 3 * HeapSize.REFERENCE + 1 * HeapSize.ARRAY;
+  private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG + 
+    2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * HeapSize.REFERENCE + 
+    1 * HeapSize.ARRAY;
   
   /** Load factor allowed (usually 75%) */
   private final float loadFactor;
@@ -922,8 +927,8 @@
   protected static class Entry<K extends HeapSize, V extends HeapSize>
   implements Map.Entry<K,V>, HeapSize {
     /** The baseline overhead memory usage of this class */
-    static final int OVERHEAD = 1 * HeapSize.LONG + 5 * HeapSize.REFERENCE + 
-      2 * HeapSize.INT;
+    static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG + 5 * HeapSize.REFERENCE + 
+      2 * Bytes.SIZEOF_INT;
     
     /** The key */
     protected final K key;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java Sat Jun 20 22:29:03 2009
@@ -251,13 +251,13 @@
       byte deleteType = deleteBuffer[deleteOffset];
       
       //Comparing with tail from memcache
-      for(KeyValue mem : tailSet) {
+      for (KeyValue mem : tailSet) {
         
         DeleteCode res = DeleteCompare.deleteCompare(mem, deleteBuffer, 
             deleteRowOffset, deleteRowLen, deleteQualifierOffset, 
             deleteQualifierLen, deleteTimestampOffset, deleteType,
             comparator.getRawComparator());
-        if(res == DeleteCode.DONE) {
+        if (res == DeleteCode.DONE) {
           break;
         } else if (res == DeleteCode.DELETE) {
           deletes.add(mem);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java Sat Jun 20 22:29:03 2009
@@ -21,9 +21,11 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.util.NavigableSet;
+
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -45,7 +47,6 @@
  * versions, 
  */
 public class QueryMatcher {
-  
   /**
    * {@link #match} return codes.  These instruct the scanner moving through
    * Memcaches and StoreFiles what to do with the current KeyValue.
@@ -113,27 +114,29 @@
   
   /** Oldest allowed version stamp for TTL enforcement */
   protected long oldestStamp;
-  
+
+  protected Filter filter;
+
   /**
    * Constructs a QueryMatcher for a Get.
    * @param get
-   * @param row
    * @param family
    * @param columns
    * @param ttl
    * @param rowComparator
    */
-  public QueryMatcher(Get get, byte [] row, byte [] family, 
+  public QueryMatcher(Get get, byte [] family, 
       NavigableSet<byte[]> columns, long ttl, KeyComparator rowComparator,
       int maxVersions) {
-    this.row = row;
+    this.row = get.getRow();
+    this.filter = get.getFilter();
     this.tr = get.getTimeRange();
     this.oldestStamp = System.currentTimeMillis() - ttl;
     this.rowComparator = rowComparator;
     this.deletes =  new GetDeleteTracker();
     this.startKey = KeyValue.createFirstOnRow(row);
     // Single branch to deal with two types of Gets (columns vs all in family)
-    if(columns == null || columns.size() == 0) {
+    if (columns == null || columns.size() == 0) {
       this.columns = new WildcardColumnTracker(maxVersions);
     } else {
       this.columns = new ExplicitColumnTracker(columns, maxVersions);
@@ -142,6 +145,7 @@
 
   // For the subclasses.
   protected QueryMatcher() {
+    super();
   }
 
   /**
@@ -151,6 +155,7 @@
    */
   public QueryMatcher(QueryMatcher matcher, byte [] row) {
     this.row = row;
+    this.filter = matcher.filter;
     this.tr = matcher.getTimeRange();
     this.oldestStamp = matcher.getOldestStamp();
     this.rowComparator = matcher.getRowComparator();
@@ -181,10 +186,12 @@
    * @return MatchCode: include, skip, next, done
    */
   public MatchCode match(KeyValue kv) {
-    if(this.columns.done()) {
+    if (this.columns.done()) {
       return MatchCode.DONE;  // done_row
     }
-    
+    if (this.filter != null && this.filter.filterAllRemaining()) {
+      return MatchCode.DONE;
+    }
     // Directly act on KV buffer
     byte [] bytes = kv.getBuffer();
     int offset = kv.getOffset();
@@ -203,15 +210,14 @@
      */ 
     int ret = this.rowComparator.compareRows(row, 0, row.length,
         bytes, offset, rowLength);
-    if(ret <= -1) {
+    if (ret <= -1) {
       // Have reached the next row
       return MatchCode.NEXT;  // got_to_next_row (end)
-    } else if(ret >= 1) {
+    } else if (ret >= 1) {
       // At a previous row
       return MatchCode.SKIP;  // skip_to_cur_row
     }
     offset += rowLength;
-    
     byte familyLength = bytes[offset];
     offset += Bytes.SIZEOF_BYTE + familyLength;
     
@@ -219,7 +225,7 @@
       (offset - kv.getOffset()) - KeyValue.TIMESTAMP_TYPE_SIZE;
     int columnOffset = offset;
     offset += columnLength;
-    
+
     /* Check TTL
      * If expired, go to next KeyValue
      */
@@ -229,7 +235,7 @@
       return MatchCode.NEXT;  // done_row
     }
     offset += Bytes.SIZEOF_LONG;
-    
+
     /* Check TYPE
      * If a delete within (or after) time range, add to deletes
      * Move to next KeyValue
@@ -237,8 +243,8 @@
     byte type = bytes[offset];
     // if delete type == delete family, return done_row
     
-    if(isDelete(type)) {
-      if(tr.withinOrAfterTimeRange(timestamp)) {
+    if (isDelete(type)) {
+      if (tr.withinOrAfterTimeRange(timestamp)) {
         this.deletes.add(bytes, columnOffset, columnLength, timestamp, type);
       }
       return MatchCode.SKIP;  // skip the delete cell.
@@ -247,29 +253,38 @@
     /* Check TimeRange
      * If outside of range, move to next KeyValue
      */
-    if(!tr.withinTimeRange(timestamp)) {
+    if (!tr.withinTimeRange(timestamp)) {
       return MatchCode.SKIP;  // optimization chances here.
     }
-    
+
     /* Check Deletes
      * If deleted, move to next KeyValue 
      */
-    if(!deletes.isEmpty() && deletes.isDeleted(bytes, columnOffset,
+    if (!deletes.isEmpty() && deletes.isDeleted(bytes, columnOffset,
         columnLength, timestamp)) {
       // 2 types of deletes:
       // affects 1 cell or 1 column, so just skip the keyvalues.
       // - delete family, so just skip to the next row.
       return MatchCode.SKIP;
     }
-    
+
     /* Check Column and Versions
      * Returns a MatchCode directly, identical language
      * If matched column without enough versions, include
      * If enough versions of this column or does not match, skip
      * If have moved past 
      * If enough versions of everything, 
+     * TODO: No mapping from Filter.ReturnCode to MatchCode.
      */
-    return columns.checkColumn(bytes, columnOffset, columnLength);
+    MatchCode mc = columns.checkColumn(bytes, columnOffset, columnLength);
+    if (mc == MatchCode.INCLUDE && this.filter != null) {
+      switch(this.filter.filterKeyValue(kv)) {
+      case INCLUDE: return MatchCode.INCLUDE;
+      case SKIP: return MatchCode.SKIP;
+      default: return MatchCode.DONE;
+      }
+    }
+    return mc;
   }
 
   // should be in KeyValue.
@@ -310,6 +325,7 @@
   public void reset() {
     this.deletes.reset();
     this.columns.reset();
+    if (this.filter != null) this.filter.reset();
   }
 
   /**

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java Sat Jun 20 22:29:03 2009
@@ -20,22 +20,19 @@
 
 package org.apache.hadoop.hbase.regionserver;
 
+import java.util.NavigableSet;
+
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-
-import java.io.IOException;
-import java.util.NavigableSet;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * A query matcher that is specifically designed for the scan case.
  */
 public class ScanQueryMatcher extends QueryMatcher {
-
-  private Filter filter;
   // have to support old style filter for now.
   private RowFilterInterface oldFilter;
   // Optimization so we can skip lots of compares when we decide to skip
@@ -94,11 +91,9 @@
       return MatchCode.DONE_SCAN;
     }
 
-    String kvStr = kv.toString();
     byte [] bytes = kv.getBuffer();
     int offset = kv.getOffset();
     int initialOffset = offset; 
-    int kvLength = kv.getLength();
 
     int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT);
     offset += KeyValue.ROW_OFFSET;
@@ -171,13 +166,11 @@
       return MatchCode.SKIP;
     }
 
-    if (deletes.isDeleted(bytes, offset,
-        qualLength, timestamp)) {
+    if (deletes.isDeleted(bytes, offset, qualLength, timestamp)) {
       return MatchCode.SKIP;
     }
-    
-    MatchCode colChecker =
-        columns.checkColumn(bytes, offset, qualLength);
+
+    MatchCode colChecker = columns.checkColumn(bytes, offset, qualLength);
 
     // if SKIP -> SEEK_NEXT_COL
     // if (NEXT,DONE) -> SEEK_NEXT_ROW
@@ -202,8 +195,7 @@
     if (filterResponse == ReturnCode.SKIP)
       return MatchCode.SKIP;
 
-    // else
-    //if (filterResponse == ReturnCode.NEXT_ROW)
+    // else if (filterResponse == ReturnCode.NEXT_ROW)
     stickyNextRow = true;
     return MatchCode.SEEK_NEXT_ROW;
   }
@@ -215,9 +207,7 @@
    * @return <code>true</code> if the row should be filtered.
    */
   public boolean filterEntireRow() {
-    if (filter == null)
-      return false;
-    return filter.filterRow();
+    return filter == null? false: filter.filterRow();
   }
 
   /**
@@ -229,13 +219,12 @@
     this.row = row;
     reset();
   }
-  
+
   @Override
   public void reset() {
     super.reset();
-
     stickyNextRow = false;
     if (filter != null)
       filter.reset();
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java Sat Jun 20 22:29:03 2009
@@ -1468,9 +1468,8 @@
     KeyComparator keyComparator = this.comparator.getRawComparator();
 
     // Column matching and version enforcement
-    QueryMatcher matcher = new QueryMatcher(get, get.getRow(), 
-        this.family.getName(), columns, this.ttl, keyComparator,
-        versionsToReturn(get.getMaxVersions()));
+    QueryMatcher matcher = new QueryMatcher(get, this.family.getName(), columns,
+      this.ttl, keyComparator, versionsToReturn(get.getMaxVersions()));
     
     // Read from Memcache
     if(this.memcache.get(matcher, result)) {
@@ -1495,6 +1494,15 @@
     // Run a GET scan and put results into the specified list 
     scanner.get(result);
   }
+
+  public static class ValueAndSize {
+    public long value;
+    public long sizeAdded;
+    public ValueAndSize(long value, long sizeAdded) {
+      this.value = value;
+      this.sizeAdded = sizeAdded;
+    }
+  }
   
   /**
    * Increments the value for the given row/family/qualifier
@@ -1505,8 +1513,8 @@
    * @return The new value.
    * @throws IOException
    */
-  public long incrementColumnValue(byte [] row, byte [] family,
-      byte [] qualifier, long amount) throws IOException{
+  public ValueAndSize incrementColumnValue(byte [] row, byte [] family,
+      byte [] qualifier, long amount) throws IOException {
     long value = 0;
     List<KeyValue> result = new ArrayList<KeyValue>();
     KeyComparator keyComparator = this.comparator.getRawComparator();
@@ -1516,8 +1524,8 @@
     NavigableSet<byte[]> qualifiers = 
       new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
     qualifiers.add(qualifier);
-    QueryMatcher matcher = new QueryMatcher(get, row, family, qualifiers,
-        this.ttl, keyComparator, 1);
+    QueryMatcher matcher = new QueryMatcher(get, family, qualifiers, this.ttl,
+      keyComparator, 1);
     
     // Read from Memcache
     if(this.memcache.get(matcher, result)) {
@@ -1528,9 +1536,8 @@
       value = Bytes.toLong(buffer, valueOffset, Bytes.SIZEOF_LONG) + amount;
       Bytes.putBytes(buffer, valueOffset, Bytes.toBytes(value), 0, 
           Bytes.SIZEOF_LONG);
-      return value;
+      return new ValueAndSize(value, 0);
     }
-    
     // Check if we even have storefiles
     if(this.storefiles.isEmpty()) {
       return addNewKeyValue(row, family, qualifier, value, amount);
@@ -1553,12 +1560,13 @@
     return addNewKeyValue(row, family, qualifier, value, amount);
   }
   
-  private long addNewKeyValue(byte [] row, byte [] family, byte [] qualifier, 
+  private ValueAndSize addNewKeyValue(byte [] row, byte [] family, byte [] qualifier,
       long value, long amount) {
     long newValue = value + amount;
-    KeyValue newKv = new KeyValue(row, family, qualifier, Bytes.toBytes(newValue));
+    KeyValue newKv = new KeyValue(row, family, qualifier,
+        System.currentTimeMillis(),
+        Bytes.toBytes(newValue));
     add(newKv);
-    return newValue;
+    return new ValueAndSize(newValue, newKv.heapSize());
   }
-  
 }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java Sat Jun 20 22:29:03 2009
@@ -123,7 +123,8 @@
    * @param result
    * @return true if there are more rows, false if scanner is done
    */
-  public synchronized boolean next(List<KeyValue> result) throws IOException {
+  public synchronized boolean next(List<KeyValue> outResult) throws IOException {
+    List<KeyValue> results = new ArrayList<KeyValue>();
     KeyValue peeked = this.heap.peek();
     if (peeked == null) {
       close();
@@ -136,27 +137,25 @@
       switch(qcode) {
         case INCLUDE:
           KeyValue next = this.heap.next();
-          result.add(next);
+          results.add(next);
           continue;
           
         case DONE:
-          // what happens if we have 0 results?
-          if (result.isEmpty()) {
-            // try the next one.
-            matcher.setRow(this.heap.peek().getRow());
-            continue;
-          }
           if (matcher.filterEntireRow()) {
-            // wow, well, um, reset the result and continue.
-            result.clear();
-            matcher.setRow(heap.peek().getRow());
-            continue;
+            // nuke all results, and then return.
+            results.clear();
           }
 
+          // copy jazz
+          outResult.addAll(results);
           return true;
 
         case DONE_SCAN:
           close();
+
+          // copy jazz
+          outResult.addAll(results);
+
           return false;
 
         case SEEK_NEXT_ROW:
@@ -178,9 +177,14 @@
           throw new RuntimeException("UNEXPECTED");
       }
     }
-    if(result.size() > 0) {
+
+    if (!results.isEmpty()) {
+      // copy jazz
+      outResult.addAll(results);
+
       return true;
     }
+
     // No more keys
     close();
     return false;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java Sat Jun 20 22:29:03 2009
@@ -298,12 +298,15 @@
         HTable table = getTable(tableName);
         Get get = new Get(row);
         get.addColumn(family, qualifier);
-        get.setTimeStamp(timestamp);
+        get.setTimeRange(Long.MIN_VALUE, timestamp);
         get.setMaxVersions(numVersions);
         Result result = table.get(get);
         List<Cell> cells = new ArrayList<Cell>();
-        for(KeyValue kv : result.sorted()) {
-          cells.add(new Cell(kv.getValue(), kv.getTimestamp()));
+        KeyValue [] kvs = result.sorted();
+        if (kvs != null) {
+          for(KeyValue kv : kvs) {
+            cells.add(new Cell(kv.getValue(), kv.getTimestamp()));
+          }
         }
         return ThriftUtilities.cellFromHBase(cells.toArray(new Cell[0]));
       } catch (IOException e) {
@@ -335,7 +338,7 @@
         HTable table = getTable(tableName);
         if (columns == null) {
           Get get = new Get(row);
-          get.setTimeStamp(timestamp);
+          get.setTimeRange(Long.MIN_VALUE, timestamp);
           Result result = table.get(get);
           return ThriftUtilities.rowResultFromHBase(result.getRowResult());
         }
@@ -345,7 +348,7 @@
           byte [][] famAndQf = KeyValue.parseColumn(column);
           get.addColumn(famAndQf[0], famAndQf[1]);
         }
-        get.setTimeStamp(timestamp);
+        get.setTimeRange(Long.MIN_VALUE, timestamp);
         Result result = table.get(get);
         return ThriftUtilities.rowResultFromHBase(result.getRowResult());
       } catch (IOException e) {
@@ -362,12 +365,12 @@
         long timestamp) throws IOError {
       try {
         HTable table = getTable(tableName);
-        Delete delete  = new Delete(row, timestamp, null);
+        Delete delete  = new Delete(row);
         byte [][] famAndQf = KeyValue.parseColumn(column);
-        if(famAndQf[1].length == 0){
-          delete.deleteFamily(famAndQf[0]);
+        if (famAndQf[1].length == 0) {
+          delete.deleteFamily(famAndQf[0], timestamp);
         } else {
-          delete.deleteColumns(famAndQf[0], famAndQf[1]);
+          delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
         }
         table.delete(delete);
         
@@ -457,23 +460,36 @@
     public void mutateRowsTs(byte[] tableName, List<BatchMutation> rowBatches, long timestamp)
         throws IOError, IllegalArgument, TException {
       List<Put> puts = new ArrayList<Put>();
-       
+      List<Delete> deletes = new ArrayList<Delete>();
+
       for (BatchMutation batch : rowBatches) {
         byte[] row = batch.row;
         List<Mutation> mutations = batch.mutations;
+        Delete delete = new Delete(row);
         Put put = new Put(row);
         put.setTimeStamp(timestamp);
         for (Mutation m : mutations) {
           byte [][] famAndQf = KeyValue.parseColumn(m.column);
-          put.add(famAndQf[0], famAndQf[1], m.value);
-        }
-        puts.add(put);
+          if (m.isDelete) {
+            delete.deleteColumns(famAndQf[0], famAndQf[1]);
+          } else {
+            put.add(famAndQf[0], famAndQf[1], m.value);
+          }
+        }
+        if (!delete.isEmpty())
+          deletes.add(delete);
+        if (!put.isEmpty())
+          puts.add(put);
       }
 
       HTable table = null;
       try {
         table = getTable(tableName);
-        table.put(puts);
+        if (!puts.isEmpty())
+          table.put(puts);
+        for (Delete del : deletes) {
+          table.delete(del);
+        }
       } catch (IOException e) {
         throw new IOError(e.getMessage());
       } catch (IllegalArgumentException e) {
@@ -534,19 +550,19 @@
     public int scannerOpen(byte[] tableName, byte[] startRow,
             List<byte[]> columns) throws IOError {
         try {
-            HTable table = getTable(tableName);
-            byte[][] columnsArray = null;
-            if ((columns == null) || (columns.size() == 0)) {
-                columnsArray = getAllColumns(table);
-        } else {
-          columnsArray = columns.toArray(new byte[0][]);
+          HTable table = getTable(tableName);
+          byte[][] columnsArray = null;
+          if ((columns == null) || (columns.size() == 0)) {
+            columnsArray = getAllColumns(table);
+          } else {
+            columnsArray = columns.toArray(new byte[0][]);
+          }
+          Scan scan = new Scan(startRow);
+          scan.addColumns(columnsArray);
+          return addScanner(table.getScanner(scan));
+        } catch (IOException e) {
+          throw new IOError(e.getMessage());
         }
-        Scan scan = new Scan(startRow);
-        scan.addColumns(columnsArray);
-        return addScanner(table.getScanner(scan));
-      } catch (IOException e) {
-        throw new IOError(e.getMessage());
-      }
     }
     
     public int scannerOpenWithStop(byte[] tableName, byte[] startRow,
@@ -579,7 +595,7 @@
         }
         Scan scan = new Scan(startRow);
         scan.addColumns(columnsArray);
-        scan.setTimeRange(0, timestamp);
+        scan.setTimeRange(Long.MIN_VALUE, timestamp);
         return addScanner(table.getScanner(scan));
       } catch (IOException e) {
         throw new IOError(e.getMessage());
@@ -599,7 +615,7 @@
         }
         Scan scan = new Scan(startRow, stopRow);
         scan.addColumns(columnsArray);
-        scan.setTimeRange(0, timestamp);
+        scan.setTimeRange(Long.MIN_VALUE, timestamp);
         return addScanner(table.getScanner(scan));
       } catch (IOException e) {
         throw new IOError(e.getMessage());

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Bytes.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Bytes.java?rev=786919&r1=786918&r2=786919&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Bytes.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Bytes.java Sat Jun 20 22:29:03 2009
@@ -41,34 +41,45 @@
 public class Bytes {
   
   /**
-   * Size of long in bytes
+   * Size of boolean in bytes
    */
-  public static final int SIZEOF_LONG = Long.SIZE/Byte.SIZE;
-
+  public static final int SIZEOF_BOOLEAN = Byte.SIZE/Byte.SIZE;
+  
   /**
-   * Size of int in bytes
+   * Size of byte in bytes
    */
-  public static final int SIZEOF_INT = Integer.SIZE/Byte.SIZE;
+  public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN;
   
   /**
-   * Size of short in bytes
+   * Size of char in bytes
    */
-  public static final int SIZEOF_SHORT = Short.SIZE/Byte.SIZE;
-
+  public static final int SIZEOF_CHAR = Character.SIZE/Byte.SIZE;
+  
+  /**
+   * Size of double in bytes
+   */
+  public static final int SIZEOF_DOUBLE = Double.SIZE/Byte.SIZE;
+  
   /**
    * Size of float in bytes
    */
   public static final int SIZEOF_FLOAT = Float.SIZE/Byte.SIZE;
-
+  
   /**
-   * Size of double in bytes
+   * Size of int in bytes
    */
-  public static final int SIZEOF_DOUBLE = Double.SIZE/Byte.SIZE;
+  public static final int SIZEOF_INT = Integer.SIZE/Byte.SIZE;
+  
+  /**
+   * Size of long in bytes
+   */
+  public static final int SIZEOF_LONG = Long.SIZE/Byte.SIZE;
 
   /**
-   * Size of byte in bytes
+   * Size of short in bytes
    */
-  public static final int SIZEOF_BYTE = 1;
+  public static final int SIZEOF_SHORT = Short.SIZE/Byte.SIZE;
+
   
   /**
    * Estimate of size cost to pay beyond payload in jvm for instance of byte [].
@@ -265,11 +276,11 @@
     return result;
   }
 
-  public static String toStringBinary(final byte []b) {
+  public static String toStringBinary(final byte [] b) {
     return toStringBinary(b, 0, b.length);
   }
 
-  public static String toStringBinary(final byte []b, int off, int len) {
+  public static String toStringBinary(final byte [] b, int off, int len) {
     String result = null;
     try {
       String first = new String(b, off, len, "ISO-8859-1");
@@ -1126,4 +1137,5 @@
     }
     return value;
   }
+  
 }



Mime
View raw message