hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r785076 [10/18] - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ bin/ conf/ src/java/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/...
Date Tue, 16 Jun 2009 04:34:02 GMT
Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Tue Jun 16 04:33:56 2009
@@ -19,31 +19,28 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.io.HalfHFileReader;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
-import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Hash;
-import org.apache.hadoop.io.RawComparator;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 /**
  * A Store data file.  Stores usually have one or more of these files.  They
@@ -58,7 +55,7 @@
 public class StoreFile implements HConstants {
   static final Log LOG = LogFactory.getLog(StoreFile.class.getName());
 
-  public static final String HFILE_CACHE_SIZE_KEY = "hfile.block.cache.size";
+  private static final String HFILE_CACHE_SIZE_KEY = "hfile.block.cache.size";
 
   private static BlockCache hfileBlockCache = null;
   
@@ -73,7 +70,9 @@
   private Reference reference;
   // If this StoreFile references another, this is the other files path.
   private Path referencePath;
-
+  // Should the block cache be used or not.
+  private boolean blockcache;
+  
   // Keys for metadata stored in backing HFile.
   private static final byte [] MAX_SEQ_ID_KEY = Bytes.toBytes("MAX_SEQ_ID_KEY");
   // Set when we obtain a Reader.
@@ -84,7 +83,7 @@
   // If true, this file was product of a major compaction.  Its then set
   // whenever you get a Reader.
   private AtomicBoolean majorCompaction = null;
-
+  
   /*
    * Regex that will work for straight filenames and for reference names.
    * If reference, then the regex has more than just one group.  Group 1 is
@@ -100,23 +99,27 @@
   private final HBaseConfiguration conf;
 
   /**
-   * Constructor, loads a reader and it's indices, etc. May allocate a substantial
-   * amount of ram depending on the underlying files (10-20MB?).
-   * @param fs
-   * @param p
-   * @param conf
-   * @throws IOException
+   * Constructor, loads a reader and it's indices, etc. May allocate a 
+   * substantial amount of ram depending on the underlying files (10-20MB?).
+   * 
+   * @param fs  The current file system to use.
+   * @param p  The path of the file.
+   * @param blockcache  <code>true</code> if the block cache is enabled.
+   * @param conf  The current configuration.
+   * @throws IOException When opening the reader fails.
    */
-  StoreFile(final FileSystem fs, final Path p, final HBaseConfiguration conf) throws IOException {
+  StoreFile(final FileSystem fs, final Path p, final boolean blockcache, 
+      final HBaseConfiguration conf) 
+  throws IOException {
     this.conf = conf;
     this.fs = fs;
     this.path = p;
+    this.blockcache = blockcache;
     if (isReference(p)) {
       this.reference = Reference.read(fs, p);
       this.referencePath = getReferredToFile(this.path);
     }
     this.reader = open();
-
   }
 
   /**
@@ -208,6 +211,12 @@
     return this.sequenceid;
   }
 
+  /**
+   * Returns the block cache or <code>null</code> in case none should be used.
+   * 
+   * @param conf  The current configuration.
+   * @return The block cache or <code>null</code>.
+   */
   public static synchronized BlockCache getBlockCache(HBaseConfiguration conf) {
     if (hfileBlockCache != null)
       return hfileBlockCache;
@@ -221,8 +230,11 @@
     return hfileBlockCache;
   }
 
+  /**
+   * @return the blockcache
+   */
   public BlockCache getBlockCache() {
-    return getBlockCache(conf);
+    return blockcache ? getBlockCache(conf) : null;
   }
 
   /**
@@ -237,8 +249,8 @@
       throw new IllegalAccessError("Already open");
     }
     if (isReference()) {
-      this.reader = new HalfHFileReader(this.fs, this.referencePath, getBlockCache(),
-        this.reference);
+      this.reader = new HalfHFileReader(this.fs, this.referencePath, 
+          getBlockCache(), this.reference);
     } else {
       this.reader = new StoreFileReader(this.fs, this.path, getBlockCache());
     }
@@ -269,13 +281,23 @@
         this.majorCompaction.set(mc);
       }
     }
+
+    // TODO read in bloom filter here, ignore if the column family config says
+    // "no bloom filter" even if there is one in the hfile.
     return this.reader;
   }
-  
+
   /**
    * Override to add some customization on HFile.Reader
    */
   static class StoreFileReader extends HFile.Reader {
+    /**
+     * 
+     * @param fs
+     * @param path
+     * @param cache
+     * @throws IOException
+     */
     public StoreFileReader(FileSystem fs, Path path, BlockCache cache)
         throws IOException {
       super(fs, path, cache);
@@ -296,6 +318,14 @@
    * Override to add some customization on HalfHFileReader.
    */
   static class HalfStoreFileReader extends HalfHFileReader {
+    /**
+     * 
+     * @param fs
+     * @param p
+     * @param c
+     * @param r
+     * @throws IOException
+     */
     public HalfStoreFileReader(FileSystem fs, Path p, BlockCache c, Reference r)
         throws IOException {
       super(fs, p, c, r);
@@ -303,7 +333,8 @@
 
     @Override
     public String toString() {
-      return super.toString() + (isTop()? ", half=top": ", half=bottom");
+      return super.toString() + (isTop()? ", half=top": ", half=bottom") +
+          " splitKey: " + KeyValue.keyToString(splitkey);
     }
 
     @Override
@@ -384,7 +415,7 @@
    */
   public static HFile.Writer getWriter(final FileSystem fs, final Path dir)
   throws IOException {
-    return getWriter(fs, dir, DEFAULT_BLOCKSIZE_SMALL, null, null, false);
+    return getWriter(fs, dir, DEFAULT_BLOCKSIZE_SMALL, null, null);
   }
 
   /**
@@ -397,13 +428,12 @@
    * @param blocksize
    * @param algorithm Pass null to get default.
    * @param c Pass null to get default.
-   * @param filter BloomFilter
    * @return HFile.Writer
    * @throws IOException
    */
   public static HFile.Writer getWriter(final FileSystem fs, final Path dir,
-    final int blocksize, final Compression.Algorithm algorithm,
-    final KeyValue.KeyComparator c, final boolean filter)
+                                       final int blocksize, final Compression.Algorithm algorithm,
+                                       final KeyValue.KeyComparator c)
   throws IOException {
     if (!fs.exists(dir)) {
       fs.mkdirs(dir);
@@ -411,7 +441,7 @@
     Path path = getUniqueFile(fs, dir);
     return new HFile.Writer(fs, path, blocksize,
       algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
-      c == null? KeyValue.KEY_COMPARATOR: c, filter);
+      c == null? KeyValue.KEY_COMPARATOR: c);
   }
 
   /**
@@ -445,7 +475,6 @@
    * @param dir
    * @param suffix
    * @return Path to a file that doesn't exist at time of this invocation.
-   * @return
    * @throws IOException
    */
   static Path getRandomFilename(final FileSystem fs, final Path dir,
@@ -465,8 +494,8 @@
    * Write file metadata.
    * Call before you call close on the passed <code>w</code> since its written
    * as metadata to that file.
-   *
-   * @param w
+   * 
+   * @param w hfile writer
    * @param maxSequenceId Maximum sequence id.
    * @throws IOException
    */

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+
+/**
+ * Use to execute a get by scanning all the store files in order.
+ */
+public class StoreFileGetScan {
+
+  private List<HFileScanner> scanners;
+  private QueryMatcher matcher;
+
+  private KeyValue startKey;
+  
+  /**
+   * Constructor
+   * @param scanners
+   * @param matcher
+   */
+  public StoreFileGetScan(List<HFileScanner> scanners, QueryMatcher matcher) {
+    this.scanners = scanners;
+    this.matcher = matcher;
+    this.startKey = matcher.getStartKey();
+  }
+
+  /**
+   * Performs a GET operation across multiple StoreFiles.
+   * <p>
+   * This style of StoreFile scanning goes through each
+   * StoreFile in its entirety, most recent first, before
+   * proceeding to the next StoreFile.
+   * <p>
+   * This strategy allows for optimal, stateless (no persisted Scanners)
+   * early-out scenarios.    
+   * @param result List to add results to
+   * @throws IOException
+   */
+  public void get(List<KeyValue> result) throws IOException {
+    for(HFileScanner scanner : this.scanners) {
+      this.matcher.update();
+      if(getStoreFile(scanner, result) || matcher.isDone()) {
+        return;
+      }
+    }
+  }
+  
+  /**
+   * Performs a GET operation on a single StoreFile.
+   * @param scanner
+   * @param result
+   * @return true if done with this store, false if must continue to next
+   * @throws IOException 
+   */
+  public boolean getStoreFile(HFileScanner scanner, List<KeyValue> result) 
+  throws IOException {
+    if(scanner.seekTo(startKey.getBuffer(), startKey.getKeyOffset(),
+        startKey.getKeyLength()) == -1) {
+      // No keys in StoreFile at or after specified startKey
+      // First row may be = our row, so we have to check anyways.
+      byte [] firstKey = scanner.getReader().getFirstKey();
+      short rowLen = Bytes.toShort(firstKey, 0, Bytes.SIZEOF_SHORT);
+      int rowOffset = Bytes.SIZEOF_SHORT;
+      if (this.matcher.rowComparator.compareRows(firstKey, rowOffset, rowLen,
+          startKey.getBuffer(), startKey.getRowOffset(), startKey.getRowLength())
+          != 0)
+        return false;
+      scanner.seekTo();
+    }
+    do {
+      KeyValue kv = scanner.getKeyValue();
+      switch(matcher.match(kv)) {
+        case INCLUDE:
+          result.add(kv);
+          break;
+        case SKIP:
+          break;
+        case NEXT:
+          return false;
+        case DONE:
+          return true;
+      }
+    } while(scanner.next());
+    return false;
+  }
+  
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java Tue Jun 16 04:33:56 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2008 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -21,306 +21,87 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 
 /**
- * A scanner that iterates through HStore files
+ * A KeyValue scanner that iterates over a single HFile
  */
-class StoreFileScanner extends HAbstractScanner
-implements ChangedReadersObserver {
-    // Keys retrieved from the sources
-  private volatile KeyValue keys[];
+class StoreFileScanner implements KeyValueScanner {
   
-  // Readers we go against.
-  private volatile HFileScanner [] scanners;
+  private HFileScanner hfs;
+  private KeyValue cur = null;
   
-  // Store this scanner came out of.
-  private final Store store;
-  
-  // Used around replacement of Readers if they change while we're scanning.
-  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-
-  private final long now = System.currentTimeMillis();
-
   /**
-   * @param store
-   * @param timestamp
-   * @param columns
-   * @param firstRow
-   * @param deletes Set of running deletes
-   * @throws IOException
+   * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner}
+   * @param hfs HFile scanner
    */
-  public StoreFileScanner(final Store store, final long timestamp,
-    final NavigableSet<byte []> columns, final byte [] firstRow)
-  throws IOException {
-    super(timestamp, columns);
-    this.store = store;
-    this.store.addChangedReaderObserver(this);
-    try {
-      openScanner(firstRow);
-    } catch (Exception ex) {
-      close();
-      IOException e = new IOException("HStoreScanner failed construction");
-      e.initCause(ex);
-      throw e;
-    }
+  public StoreFileScanner(HFileScanner hfs) {
+    this.hfs = hfs;
   }
 
-  /*
-   * Go open new scanners and cue them at <code>firstRow</code>.
-   * Closes existing Readers if any.
-   * @param firstRow
-   * @throws IOException
-   */
-  private void openScanner(final byte [] firstRow) throws IOException {
-    List<HFileScanner> s =
-      new ArrayList<HFileScanner>(this.store.getStorefiles().size());
-    Map<Long, StoreFile> map = this.store.getStorefiles().descendingMap();
-    for (StoreFile f: map.values()) {
-       s.add(f.getReader().getScanner());
-    }
-    this.scanners = s.toArray(new HFileScanner [] {});
-    this.keys = new KeyValue[this.scanners.length];
-    // Advance the readers to the first pos.
-    KeyValue firstKey = (firstRow != null && firstRow.length > 0)?
-      new KeyValue(firstRow, HConstants.LATEST_TIMESTAMP): null;
-    for (int i = 0; i < this.scanners.length; i++) {
-      if (firstKey != null) {
-        if (seekTo(i, firstKey)) {
-          continue;
-        }
-      }
-      while (getNext(i)) {
-        if (columnMatch(i)) {
-          break;
-        }
-      }
-    }
+  public String toString() {
+    return "StoreFileScanner[" + hfs.toString() + ", cur=" + cur + "]";
   }
-
-  /**
-   * For a particular column i, find all the matchers defined for the column.
-   * Compare the column family and column key using the matchers. The first one
-   * that matches returns true. If no matchers are successful, return false.
-   * 
-   * @param i index into the keys array
-   * @return true if any of the matchers for the column match the column family
-   * and the column key.
-   * @throws IOException
-   */
-  boolean columnMatch(int i) throws IOException {
-    return columnMatch(keys[i]);
+  
+  public KeyValue peek() {
+    return cur;
   }
-
-  /**
-   * Get the next set of values for this scanner.
-   * 
-   * @param key The key that matched
-   * @param results All the results for <code>key</code>
-   * @return true if a match was found
-   * @throws IOException
-   * 
-   * @see org.apache.hadoop.hbase.regionserver.InternalScanner#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap)
-   */
-  @Override
-  public boolean next(List<KeyValue> results)
-  throws IOException {
-    if (this.scannerClosed) {
-      return false;
-    }
-    this.lock.readLock().lock();
+  
+  public KeyValue next() {
+    KeyValue retKey = cur;
+    cur = hfs.getKeyValue();
     try {
-      // Find the next viable row label (and timestamp).
-      KeyValue viable = getNextViableRow();
-      if (viable == null) {
-        return false;
-      }
-
-      // Grab all the values that match this row/timestamp
-      boolean addedItem = false;
-      for (int i = 0; i < keys.length; i++) {
-        // Fetch the data
-        while ((keys[i] != null) &&
-            (this.store.comparator.compareRows(this.keys[i], viable) == 0)) {
-          // If we are doing a wild card match or there are multiple matchers
-          // per column, we need to scan all the older versions of this row
-          // to pick up the rest of the family members
-          if(!isWildcardScanner()
-              && !isMultipleMatchScanner()
-              && (keys[i].getTimestamp() != viable.getTimestamp())) {
-            break;
-          }
-          if (columnMatch(i)) {
-            // We only want the first result for any specific family member
-            // TODO: Do we have to keep a running list of column entries in
-            // the results across all of the StoreScanner?  Like we do
-            // doing getFull?
-            if (!results.contains(keys[i])) {
-              results.add(keys[i]);
-              addedItem = true;
-            }
-          }
-
-          if (!getNext(i)) {
-            closeSubScanner(i);
-          }
-        }
-        // Advance the current scanner beyond the chosen row, to
-        // a valid timestamp, so we're ready next time.
-        while ((keys[i] != null) &&
-            ((this.store.comparator.compareRows(this.keys[i], viable) <= 0) ||
-                (keys[i].getTimestamp() > this.timestamp) ||
-                !columnMatch(i))) {
-          getNext(i);
-        }
-      }
-      return addedItem;
-    } finally {
-      this.lock.readLock().unlock();
+      // only seek if we arent at the end. cur == null implies 'end'.
+      if (cur != null)
+        hfs.next();
+    } catch(IOException e) {
+      // Turn checked exception into runtime exception.
+      throw new RuntimeException(e);
     }
+    return retKey;
   }
-
-  /*
-   * @return An instance of <code>ViableRow</code>
-   * @throws IOException
-   */
-  private KeyValue getNextViableRow() throws IOException {
-    // Find the next viable row label (and timestamp).
-    KeyValue viable = null;
-    long viableTimestamp = -1;
-    long ttl = store.ttl;
-    for (int i = 0; i < keys.length; i++) {
-      // The first key that we find that matches may have a timestamp greater
-      // than the one we're looking for. We have to advance to see if there
-      // is an older version present, since timestamps are sorted descending
-      while (keys[i] != null &&
-          keys[i].getTimestamp() > this.timestamp &&
-          columnMatch(i) &&
-          getNext(i)) {
-        if (columnMatch(i)) {
-          break;
-        }
-      }
-      if((keys[i] != null)
-          // If we get here and keys[i] is not null, we already know that the
-          // column matches and the timestamp of the row is less than or equal
-          // to this.timestamp, so we do not need to test that here
-          && ((viable == null) ||
-            (this.store.comparator.compareRows(this.keys[i], viable) < 0) ||
-            ((this.store.comparator.compareRows(this.keys[i], viable) == 0) &&
-              (keys[i].getTimestamp() > viableTimestamp)))) {
-        if (ttl == HConstants.FOREVER || now < keys[i].getTimestamp() + ttl) {
-          viable = keys[i];
-        } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("getNextViableRow :" + keys[i] + ": expired, skipped");
-          }
-        }
+  
+  public boolean seek(KeyValue key) {
+    try {
+      if(!seekAtOrAfter(hfs, key)) {
+        close();
+        return false;
       }
+      cur = hfs.getKeyValue();
+      hfs.next();
+      return true;
+    } catch(IOException ioe) {
+      close();
+      return false;
     }
-    return viable;
   }
-
-  /*
-   * The user didn't want to start scanning at the first row. This method
-   * seeks to the requested row.
-   *
-   * @param i which iterator to advance
-   * @param firstRow seek to this row
-   * @return true if we found the first row and so the scanner is properly
-   * primed or true if the row was not found and this scanner is exhausted.
-   */
-  private boolean seekTo(int i, final KeyValue firstKey)
-  throws IOException {
-    if (firstKey == null) {
-      if (!this.scanners[i].seekTo()) {
-        closeSubScanner(i);
-        return true;
-      }
-    } else {
-      // TODO: sort columns and pass in column as part of key so we get closer.
-      if (!Store.getClosest(this.scanners[i], firstKey)) {
-        closeSubScanner(i);
-        return true;
-      }
-    }
-    this.keys[i] = this.scanners[i].getKeyValue();
-    return isGoodKey(this.keys[i]);
+  
+  public void close() {
+    // Nothing to close on HFileScanner?
+    cur = null;
   }
-
+  
   /**
-   * Get the next value from the specified reader.
    * 
-   * @param i which reader to fetch next value from
-   * @return true if there is more data available
-   */
-  private boolean getNext(int i) throws IOException {
-    boolean result = false;
-    while (true) {
-      if ((this.scanners[i].isSeeked() && !this.scanners[i].next()) ||
-          (!this.scanners[i].isSeeked() && !this.scanners[i].seekTo())) {
-        closeSubScanner(i);
-        break;
-      }
-      this.keys[i] = this.scanners[i].getKeyValue();
-      if (isGoodKey(this.keys[i])) {
-          result = true;
-          break;
-      }
-    }
-    return result;
-  }
-
-  /*
-   * @param kv
-   * @return True if good key candidate.
+   * @param s
+   * @param k
+   * @return
+   * @throws IOException
    */
-  private boolean isGoodKey(final KeyValue kv) {
-    return !Store.isExpired(kv, this.store.ttl, this.now);
-  }
-
-  /** Close down the indicated reader. */
-  private void closeSubScanner(int i) {
-    this.scanners[i] = null;
-    this.keys[i] = null;
-  }
-
-  /** Shut it down! */
-  public void close() {
-    if (!this.scannerClosed) {
-      this.store.deleteChangedReaderObserver(this);
-      try {
-        for(int i = 0; i < this.scanners.length; i++) {
-          closeSubScanner(i);
-        }
-      } finally {
-        this.scannerClosed = true;
-      }
-    }
-  }
-
-  // Implementation of ChangedReadersObserver
-  
-  public void updateReaders() throws IOException {
-    this.lock.writeLock().lock();
-    try {
-      // The keys are currently lined up at the next row to fetch.  Pass in
-      // the current row as 'first' row and readers will be opened and cue'd
-      // up so future call to next will start here.
-      KeyValue viable = getNextViableRow();
-      openScanner(viable.getRow());
-      LOG.debug("Replaced Scanner Readers at row " +
-        viable.getRow().toString());
-    } finally {
-      this.lock.writeLock().unlock();
+  public static boolean seekAtOrAfter(HFileScanner s, KeyValue k)
+  throws IOException {
+    int result = s.seekTo(k.getBuffer(), k.getKeyOffset(), k.getKeyLength());
+    if(result < 0) {
+      // Passed KV is smaller than first KV in file, work from start of file
+      return s.seekTo();
+    } else if(result > 0) {
+      // Passed KV is larger than current KV in file, if there is a next
+      // it is the "after", if not then this scanner is done.
+      return s.next();
     }
+    // Seeked to the exact key
+    return true;
   }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java Tue Jun 16 04:33:56 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2008 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -25,288 +25,236 @@
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
-import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 
 /**
- * Scanner scans both the memcache and the HStore
+ * Scanner scans both the memcache and the HStore. Coaleace KeyValue stream
+ * into List<KeyValue> for a single row.
  */
-class StoreScanner implements InternalScanner,  ChangedReadersObserver {
+class StoreScanner implements KeyValueScanner, InternalScanner,
+ChangedReadersObserver {
   static final Log LOG = LogFactory.getLog(StoreScanner.class);
 
-  private InternalScanner [] scanners;
-  private List<KeyValue> [] resultSets;
-  private boolean wildcardMatch = false;
-  private boolean multipleMatchers = false;
-  private RowFilterInterface dataFilter;
   private Store store;
-  private final long timestamp;
-  private final NavigableSet<byte []> columns;
-  
-  // Indices for memcache scanner and hstorefile scanner.
-  private static final int MEMS_INDEX = 0;
-  private static final int HSFS_INDEX = MEMS_INDEX + 1;
-  
+
+  private ScanQueryMatcher matcher;
+
+  private KeyValueHeap heap;
+
   // Used around transition from no storefile to the first.
   private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
 
   // Used to indicate that the scanner has closed (see HBASE-1107)
   private final AtomicBoolean closing = new AtomicBoolean(false);
 
-  /** Create an Scanner with a handle on the memcache and HStore files. */
-  @SuppressWarnings("unchecked")
-  StoreScanner(Store store, final NavigableSet<byte []> targetCols,
-    byte [] firstRow, long timestamp, RowFilterInterface filter) 
-  throws IOException {
+  /**
+   * Opens a scanner across memcache, snapshot, and all StoreFiles.
+   */
+  StoreScanner(Store store, Scan scan, final NavigableSet<byte[]> columns) {
     this.store = store;
-    this.dataFilter = filter;
-    if (null != dataFilter) {
-      dataFilter.reset();
-    }
-    this.scanners = new InternalScanner[2];
-    this.resultSets = new List[scanners.length];
-    // Save these args in case we need them later handling change in readers
-    // See updateReaders below.
-    this.timestamp = timestamp;
-    this.columns = targetCols;
-    try {
-      scanners[MEMS_INDEX] =
-        store.memcache.getScanner(timestamp, targetCols, firstRow);
-      scanners[HSFS_INDEX] =
-        new StoreFileScanner(store, timestamp, targetCols, firstRow);
-      for (int i = MEMS_INDEX; i < scanners.length; i++) {
-        checkScannerFlags(i);
-      }
-    } catch (IOException e) {
-      doClose();
-      throw e;
-    }
-    
-    // Advance to the first key in each scanner.
-    // All results will match the required column-set and scanTime.
-    for (int i = MEMS_INDEX; i < scanners.length; i++) {
-      setupScanner(i);
+    matcher = new ScanQueryMatcher(scan, store.getFamily().getName(),
+        columns, store.ttl, store.comparator.getRawComparator(),
+        store.versionsToReturn(scan.getMaxVersions()));
+
+    List<KeyValueScanner> scanners = getStoreFileScanners();
+    scanners.add(store.memcache.getScanner());
+
+    // Seek all scanners to the initial key
+    for(KeyValueScanner scanner : scanners) {
+      scanner.seek(matcher.getStartKey());
     }
+
+    // Combine all seeked scanners with a heap
+    heap = new KeyValueHeap(
+        scanners.toArray(new KeyValueScanner[scanners.size()]), store.comparator);
+
     this.store.addChangedReaderObserver(this);
   }
-  
-  /*
-   * @param i Index.
-   */
-  private void checkScannerFlags(final int i) {
-    if (this.scanners[i].isWildcardScanner()) {
-      this.wildcardMatch = true;
-    }
-    if (this.scanners[i].isMultipleMatchScanner()) {
-      this.multipleMatchers = true;
+
+  // Constructor for testing.
+  StoreScanner(Scan scan, byte [] colFamily,
+      long ttl, KeyValue.KVComparator comparator,
+      final NavigableSet<byte[]> columns,
+      KeyValueScanner [] scanners) {
+    this.store = null;
+    this.matcher = new ScanQueryMatcher(scan, colFamily, columns, ttl, 
+        comparator.getRawComparator(), scan.getMaxVersions());
+
+    // Seek all scanners to the initial key
+    for(KeyValueScanner scanner : scanners) {
+      scanner.seek(matcher.getStartKey());
     }
+
+    heap = new KeyValueHeap(
+        scanners, comparator);
   }
-  
-  /*
-   * Do scanner setup.
-   * @param i
-   * @throws IOException
-   */
-  private void setupScanner(final int i) throws IOException {
-    this.resultSets[i] = new ArrayList<KeyValue>();
-    if (this.scanners[i] != null && !this.scanners[i].next(this.resultSets[i])) {
-      closeScanner(i);
-    }
+
+  public KeyValue peek() {
+    return this.heap.peek();
   }
 
-  /** @return true if the scanner is a wild card scanner */
-  public boolean isWildcardScanner() {
-    return this.wildcardMatch;
+  public KeyValue next() {
+    // throw runtime exception perhaps?
+    throw new RuntimeException("Never call StoreScanner.next()");
   }
 
-  /** @return true if the scanner is a multiple match scanner */
-  public boolean isMultipleMatchScanner() {
-    return this.multipleMatchers;
+  public void close() {
+    this.closing.set(true);
+    // under test, we dont have a this.store
+    if (this.store != null)
+      this.store.deleteChangedReaderObserver(this);
+    this.heap.close();
   }
 
-  public boolean next(List<KeyValue> results)
-  throws IOException {
-    this.lock.readLock().lock();
-    try {
-    // Filtered flag is set by filters.  If a cell has been 'filtered out'
-    // -- i.e. it is not to be returned to the caller -- the flag is 'true'.
-    boolean filtered = true;
-    boolean moreToFollow = true;
-    while (filtered && moreToFollow) {
-      // Find the lowest-possible key.
-      KeyValue chosen = null;
-      long chosenTimestamp = -1;
-      for (int i = 0; i < this.scanners.length; i++) {
-        KeyValue kv = this.resultSets[i] == null || this.resultSets[i].isEmpty()?
-          null: this.resultSets[i].get(0);
-        if (kv == null) {
+  public boolean seek(KeyValue key) {
+
+    return this.heap.seek(key);
+  }
+
+  /**
+   * Get the next row of values from this Store.
+   * @param result
+   * @return true if there are more rows, false if scanner is done
+   */
+  public boolean next(List<KeyValue> result) throws IOException {
+    KeyValue peeked = this.heap.peek();
+    if (peeked == null) {
+      close();
+      return false;
+    }
+    matcher.setRow(peeked.getRow());
+    KeyValue kv;
+    while((kv = this.heap.peek()) != null) {
+      QueryMatcher.MatchCode mc = matcher.match(kv);
+      switch(mc) {
+        case INCLUDE:
+          KeyValue next = this.heap.next();
+          result.add(next);
           continue;
-        }
-        if (scanners[i] != null &&
-            (chosen == null ||
-              (this.store.comparator.compareRows(kv, chosen) < 0) ||
-              ((this.store.comparator.compareRows(kv, chosen) == 0) &&
-              (kv.getTimestamp() > chosenTimestamp)))) {
-          chosen = kv;
-          chosenTimestamp = chosen.getTimestamp();
-        }
-      }
+        case DONE:
+          // what happens if we have 0 results?
+          if (result.isEmpty()) {
+            // try the next one.
+            matcher.setRow(this.heap.peek().getRow());
+            continue;
+          }
+          if (matcher.filterEntireRow()) {
+            // wow, well, um, reset the result and continue.
+            result.clear();
+            matcher.setRow(heap.peek().getRow());
+            continue;
+          }
+
+          return true;
 
-      // Filter whole row by row key?
-      filtered = dataFilter == null || chosen == null? false:
-        dataFilter.filterRowKey(chosen.getBuffer(), chosen.getRowOffset(),
-          chosen.getRowLength());
-
-      // Store results for each sub-scanner.
-      if (chosenTimestamp >= 0 && !filtered) {
-        NavigableSet<KeyValue> deletes =
-          new TreeSet<KeyValue>(this.store.comparatorIgnoringType);
-        for (int i = 0; i < scanners.length && !filtered; i++) {
-          if ((scanners[i] != null && !filtered && moreToFollow &&
-              this.resultSets[i] != null && !this.resultSets[i].isEmpty())) {
-            // Test this resultset is for the 'chosen' row.
-            KeyValue firstkv = resultSets[i].get(0);
-            if (!this.store.comparator.matchingRows(firstkv, chosen)) {
-              continue;
-            }
-            // Its for the 'chosen' row, work it.
-            for (KeyValue kv: resultSets[i]) {
-              if (kv.isDeleteType()) {
-                deletes.add(kv);
-              } else if ((deletes.isEmpty() || !deletes.contains(kv)) &&
-                  !filtered && moreToFollow && !results.contains(kv)) {
-                if (this.dataFilter != null) {
-                  // Filter whole row by column data?
-                  int rowlength = kv.getRowLength();
-                  int columnoffset = kv.getColumnOffset(rowlength);
-                  filtered = dataFilter.filterColumn(kv.getBuffer(),
-                      kv.getRowOffset(), rowlength,
-                    kv.getBuffer(), columnoffset, kv.getColumnLength(columnoffset),
-                    kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
-                  if (filtered) {
-                    results.clear();
-                    break;
-                  }
-                }
-                results.add(kv);
-                /* REMOVING BECAUSE COULD BE BUNCH OF DELETES IN RESULTS
-                   AND WE WANT TO INCLUDE THEM -- below short-circuit is
-                   probably not wanted.
-                // If we are doing a wild card match or there are multiple
-                // matchers per column, we need to scan all the older versions of 
-                // this row to pick up the rest of the family members
-                if (!wildcardMatch && !multipleMatchers &&
-                    (kv.getTimestamp() != chosenTimestamp)) {
-                  break;
-                }
-                */
-              }
-            }
-            // Move on to next row.
-            resultSets[i].clear();
-            if (!scanners[i].next(resultSets[i])) {
-              closeScanner(i);
-            }
+        case DONE_SCAN:
+          close();
+          return false;
+
+        case SEEK_NEXT_ROW:
+          // TODO see comments in SEEK_NEXT_COL
+          /*
+          KeyValue rowToSeek =
+              new KeyValue(kv.getRow(),
+                  0,
+                  KeyValue.Type.Minimum);
+          heap.seek(rowToSeek);
+           */
+          heap.next();
+          break;
+
+        case SEEK_NEXT_COL:
+          // TODO hfile needs 'hinted' seeking to prevent it from
+          // reseeking from the start of the block on every dang seek.
+          // We need that API and expose it the scanner chain.
+          /*
+          ColumnCount hint = matcher.getSeekColumn();
+          KeyValue colToSeek;
+          if (hint == null) {
+            // seek to the 'last' key on this column, this is defined
+            // as the key with the same row, fam, qualifier,
+            // smallest timestamp, largest type.
+            colToSeek =
+                new KeyValue(kv.getRow(),
+                    kv.getFamily(),
+                    kv.getColumn(),
+                    Long.MIN_VALUE,
+                    KeyValue.Type.Minimum);
+          } else {
+            // This is ugmo.  Move into KeyValue convience method.
+            // First key on a column is:
+            // same row, cf, qualifier, max_timestamp, max_type, no value.
+            colToSeek =
+                new KeyValue(kv.getRow(),
+                    0,
+                    kv.getRow().length,
+
+                    kv.getFamily(),
+                    0,
+                    kv.getFamily().length,
+
+                    hint.getBuffer(),
+                    hint.getOffset(),
+                    hint.getLength(),
+
+                    Long.MAX_VALUE,
+                    KeyValue.Type.Maximum,
+                    null,
+                    0,
+                    0);
           }
-        }
-      }
+          heap.seek(colToSeek);
+           */
 
-      moreToFollow = chosenTimestamp >= 0;
-      if (dataFilter != null) {
-        if (dataFilter.filterAllRemaining()) {
-          moreToFollow = false;
-        }
-      }
+          heap.next();
+          break;
 
-      if (results.isEmpty() && !filtered) {
-        // There were no results found for this row.  Marked it as 
-        // 'filtered'-out otherwise we will not move on to the next row.
-        filtered = true;
-      }
-    }
-    
-    // If we got no results, then there is no more to follow.
-    if (results == null || results.isEmpty()) {
-      moreToFollow = false;
-    }
-    
-    // Make sure scanners closed if no more results
-    if (!moreToFollow) {
-      for (int i = 0; i < scanners.length; i++) {
-        if (null != scanners[i]) {
-          closeScanner(i);
-        }
+        case SKIP:
+          this.heap.next();
+          break;
       }
     }
-    
-    return moreToFollow;
-    } finally {
-      this.lock.readLock().unlock();
+    if(result.size() > 0) {
+      return true;
     }
+    // No more keys
+    close();
+    return false;
   }
 
-  /** Shut down a single scanner */
-  void closeScanner(int i) {
-    try {
-      try {
-        scanners[i].close();
-      } catch (IOException e) {
-        LOG.warn(Bytes.toString(store.storeName) + " failed closing scanner " +
-          i, e);
-      }
-    } finally {
-      scanners[i] = null;
-      resultSets[i] = null;
+  private List<KeyValueScanner> getStoreFileScanners() {
+    List<HFileScanner> s =
+      new ArrayList<HFileScanner>(this.store.getStorefilesCount());
+    Map<Long, StoreFile> map = this.store.getStorefiles().descendingMap();
+    for(StoreFile sf : map.values()) {
+      s.add(sf.getReader().getScanner());
+    }
+    List<KeyValueScanner> scanners =
+      new ArrayList<KeyValueScanner>(s.size()+1);
+    for(HFileScanner hfs : s) {
+      scanners.add(new StoreFileScanner(hfs));
     }
+    return scanners;
   }
 
-  public void close() {
-    this.closing.set(true);
-    this.store.deleteChangedReaderObserver(this);
-    doClose();
-  }
-  
-  private void doClose() {
-    for (int i = MEMS_INDEX; i < scanners.length; i++) {
-      if (scanners[i] != null) {
-        closeScanner(i);
-      }
-    }
-  }
-  
   // Implementation of ChangedReadersObserver
-  
   public void updateReaders() throws IOException {
     if (this.closing.get()) {
       return;
     }
     this.lock.writeLock().lock();
     try {
-      Map<Long, StoreFile> map = this.store.getStorefiles();
-      if (this.scanners[HSFS_INDEX] == null && map != null && map.size() > 0) {
-        // Presume that we went from no readers to at least one -- need to put
-        // a HStoreScanner in place.
-        try {
-          // I think its safe getting key from mem at this stage -- it shouldn't have
-          // been flushed yet
-          // TODO: MAKE SURE WE UPDATE FROM TRUNNK.
-          this.scanners[HSFS_INDEX] = new StoreFileScanner(this.store,
-              this.timestamp, this. columns, this.resultSets[MEMS_INDEX].get(0).getRow());
-          checkScannerFlags(HSFS_INDEX);
-          setupScanner(HSFS_INDEX);
-          LOG.debug("Added a StoreFileScanner to outstanding HStoreScanner");
-        } catch (IOException e) {
-          doClose();
-          throw e;
-        }
-      }
+      // Could do this pretty nicely with KeyValueHeap, but the existing
+      // implementation of this method only updated if no existing storefiles?
+      // Lets discuss.
+      return;
     } finally {
       this.lock.writeLock().unlock();
     }

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,314 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * This class is used for the tracking and enforcement of columns and numbers 
+ * of versions during the course of a Get or Scan operation, when all available
+ * column qualifiers have been asked for in the query.
+ * <p>
+ * This class is utilized by {@link QueryMatcher} through two methods:
+ * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
+ * conditions of the query.  This method returns a {@link MatchCode} to define
+ * what action should be taken.
+ * <li>{@link #update} is called at the end of every StoreFile or Memcache.
+ * <p>
+ * This class is NOT thread-safe as queries are never multi-threaded 
+ */
+public class WildcardColumnTracker implements ColumnTracker {
+  
+  private int maxVersions;
+  
+  protected List<ColumnCount> columns;
+  private int index;
+  private ColumnCount column;
+  
+  private List<ColumnCount> newColumns; 
+  private int newIndex;
+  private ColumnCount newColumn;
+  
+  /**
+   * Default constructor.
+   * @param maxVersions maximum versions to return per columns
+   */
+  public WildcardColumnTracker(int maxVersions) {
+    this.maxVersions = maxVersions;
+    reset();
+  }
+  
+  public void reset() {
+    this.index = 0;
+    this.column = null;
+    this.columns = null;
+    this.newColumns = new ArrayList<ColumnCount>();
+    this.newIndex = 0;
+    this.newColumn = null;
+  }
+  
+  /**
+   * Can never early-out from reading more storefiles in Wildcard case.
+   */
+  public boolean done() {
+    return false;
+  }
+
+  // wildcard scanners never have column hints.
+  public ColumnCount getColumnHint() {
+    return null;
+  }
+
+  /**
+   * Checks against the parameters of the query and the columns which have
+   * already been processed by this query.
+   * @param bytes KeyValue buffer
+   * @param offset offset to the start of the qualifier
+   * @param length length of the qualifier
+   * @return MatchCode telling QueryMatcher what action to take
+   */
+  public MatchCode checkColumn(byte [] bytes, int offset, int length) {
+
+    // Nothing to match against, add to new and include
+    if(this.column == null && this.newColumn == null) {
+      newColumns.add(new ColumnCount(bytes, offset, length, 1));
+      this.newColumn = newColumns.get(newIndex);
+      return MatchCode.INCLUDE;
+    }
+    
+    // Nothing old, compare against new
+    if(this.column == null && this.newColumn != null) {
+      int ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
+          newColumn.getLength(), bytes, offset, length);
+      
+      // Same column
+      if(ret == 0) {
+        if(newColumn.increment() > this.maxVersions) {
+          return MatchCode.SKIP;
+        }
+        return MatchCode.INCLUDE;
+      }
+      
+      // Specified column is bigger than current column
+      // Move down current column and check again
+      if(ret <= -1) {
+        if(++newIndex == newColumns.size()) {
+          // No more, add to end and include
+          newColumns.add(new ColumnCount(bytes, offset, length, 1));
+          this.newColumn = newColumns.get(newIndex);
+          return MatchCode.INCLUDE;
+        }
+        this.newColumn = newColumns.get(newIndex);
+        return checkColumn(bytes, offset, length);
+      }
+      
+      // ret >= 1
+      // Specified column is smaller than current column
+      // Nothing to match against, add to new and include
+      newColumns.add(new ColumnCount(bytes, offset, length, 1));
+      this.newColumn = newColumns.get(++newIndex);
+      return MatchCode.INCLUDE;
+    }
+    
+    // Nothing new, compare against old
+    if(this.newColumn == null && this.column != null) {
+      int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
+          column.getLength(), bytes, offset, length);
+      
+      // Same column
+      if(ret == 0) {
+        if(column.increment() > this.maxVersions) {
+          return MatchCode.SKIP;
+        }
+        return MatchCode.INCLUDE;
+      }
+      
+      // Specified column is bigger than current column
+      // Move down current column and check again
+      if(ret <= -1) {
+        if(++index == columns.size()) {
+          // No more, add to new and include (new was empty prior to this)
+          newColumns.add(new ColumnCount(bytes, offset, length, 1));
+          this.newColumn = newColumns.get(newIndex);
+          this.column = null;
+          return MatchCode.INCLUDE;
+        }
+        this.column = columns.get(index);
+        return checkColumn(bytes, offset, length);
+      }
+      
+      // ret >= 1
+      // Specified column is smaller than current column
+      // Nothing to match against, add to new and include
+      newColumns.add(new ColumnCount(bytes, offset, length, 1));
+      this.newColumn = newColumns.get(newIndex);
+      return MatchCode.INCLUDE;
+    }
+    
+    
+    // There are new and old, figure which to check first
+    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
+        column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), 
+        newColumn.getLength());
+        
+    // Old is smaller than new, compare against old
+    if(ret <= -1) {
+      ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
+          column.getLength(), bytes, offset, length);
+      
+      // Same column
+      if(ret == 0) {
+        if(column.increment() > this.maxVersions) {
+          return MatchCode.SKIP;
+        }
+        return MatchCode.INCLUDE;
+      }
+      
+      // Specified column is bigger than current column
+      // Move down current column and check again
+      if(ret <= -1) {
+        if(++index == columns.size()) {
+          this.column = null;
+        } else {
+          this.column = columns.get(index);
+        }
+        return checkColumn(bytes, offset, length);
+      }
+      
+      // ret >= 1
+      // Specified column is smaller than current column
+      // Nothing to match against, add to new and include
+      newColumns.add(new ColumnCount(bytes, offset, length, 1));
+      return MatchCode.INCLUDE;
+    }
+    
+    // Cannot be equal, so ret >= 1
+    // New is smaller than old, compare against new
+    
+    ret = Bytes.compareTo(newColumn.getBuffer(), newColumn.getOffset(), 
+        newColumn.getLength(), bytes, offset, length);
+    
+    // Same column
+    if(ret == 0) {
+      if(newColumn.increment() > this.maxVersions) {
+        return MatchCode.SKIP;
+      }
+      return MatchCode.INCLUDE;
+    }
+    
+    // Specified column is bigger than current column
+    // Move down current column and check again
+    if(ret <= -1) {
+      if(++newIndex == newColumns.size()) {
+        this.newColumn = null;
+      } else {
+        this.newColumn = newColumns.get(newIndex);
+      }
+      return checkColumn(bytes, offset, length);
+    }
+    
+    // ret >= 1
+    // Specified column is smaller than current column
+    // Nothing to match against, add to new and include
+    newColumns.add(new ColumnCount(bytes, offset, length, 1));
+    return MatchCode.INCLUDE;
+  }
+  
+  /**
+   * Called at the end of every StoreFile or Memcache.
+   */
+  public void update() {
+    // If no previous columns, use new columns and return
+    if(this.columns == null || this.columns.size() == 0) {
+      if(this.newColumns.size() > 0){
+        finalize(newColumns);
+      }
+      return;
+    }
+    
+    // If no new columns, retain previous columns and return
+    if(this.newColumns.size() == 0) {
+      this.index = 0;
+      this.column = this.columns.get(index);
+      return;
+    }
+    
+    // Merge previous columns with new columns
+    // There will be no overlapping
+    List<ColumnCount> mergeColumns = new ArrayList<ColumnCount>(
+        columns.size() + newColumns.size());
+    index = 0;
+    newIndex = 0;
+    column = columns.get(0);
+    newColumn = newColumns.get(0);
+    while(true) {
+      int ret = Bytes.compareTo(
+          column.getBuffer(), column.getOffset(),column.getLength(), 
+          newColumn.getBuffer(), newColumn.getOffset(), newColumn.getLength());
+      
+      // Existing is smaller than new, add existing and iterate it
+      if(ret <= -1) {
+        mergeColumns.add(column);
+        if(++index == columns.size()) {
+          // No more existing left, merge down rest of new and return 
+          mergeDown(mergeColumns, newColumns, newIndex);
+          finalize(mergeColumns);
+          return;
+        }
+        column = columns.get(index);
+        continue;
+      }
+      
+      // New is smaller than existing, add new and iterate it
+      mergeColumns.add(newColumn);
+      if(++newIndex == newColumns.size()) {
+        // No more new left, merge down rest of existing and return
+        mergeDown(mergeColumns, columns, index);
+        finalize(mergeColumns);
+        return;
+      }
+      newColumn = newColumns.get(newIndex);
+      continue;
+    }
+  }
+  
+  private void mergeDown(List<ColumnCount> mergeColumns, 
+      List<ColumnCount> srcColumns, int srcIndex) {
+    int index = srcIndex;
+    while(index < srcColumns.size()) {
+      mergeColumns.add(srcColumns.get(index++));
+    }
+  }
+  
+  private void finalize(List<ColumnCount> mergeColumns) {
+    this.columns = mergeColumns;
+    this.index = 0;
+    this.column = this.columns.size() > 0? columns.get(index) : null;
+    
+    this.newColumns = new ArrayList<ColumnCount>();
+    this.newIndex = 0;
+    this.newColumn = null;
+  }
+  
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/RowController.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/RowController.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/RowController.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/RowController.java Tue Jun 16 04:33:56 2009
@@ -24,7 +24,9 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor;
@@ -82,7 +84,6 @@
       throws HBaseRestException {
     RowModel innerModel = getModel();
 
-    BatchUpdate b;
     RowUpdateDescriptor rud = parser
         .getRowUpdateDescriptor(input, pathSegments);
 
@@ -92,14 +93,15 @@
       return;
     }
 
-    b = new BatchUpdate(rud.getRowName());
+    Put put = new Put(Bytes.toBytes(rud.getRowName()));
 
     for (byte[] key : rud.getColVals().keySet()) {
-      b.put(key, rud.getColVals().get(key));
+      byte [][] famAndQf = KeyValue.parseColumn(key);
+      put.add(famAndQf[0], famAndQf[1], rud.getColVals().get(key));
     }
 
     try {
-      innerModel.post(rud.getTableName().getBytes(), b);
+      innerModel.post(rud.getTableName().getBytes(), put);
       s.setOK();
     } catch (HBaseRestException e) {
       s.setUnsupportedMediaType(e.getMessage());

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/RowModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/RowModel.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/RowModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/RowModel.java Tue Jun 16 04:33:56 2009
@@ -25,8 +25,13 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor;
@@ -41,44 +46,51 @@
     super.initialize(conf, admin);
   }
 
+  @Deprecated
   public RowResult get(byte[] tableName, byte[] rowName)
       throws HBaseRestException {
+    return get(tableName, new Get(rowName)).getRowResult();
+  }
+
+  public Result get(byte[] tableName, Get get)
+  throws HBaseRestException {
     try {
       HTable table = new HTable(tableName);
-      return table.getRow(rowName);
+      return table.get(get);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
   }
-
+  
+  @Deprecated
   public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns)
       throws HBaseRestException {
-    try {
-      HTable table = new HTable(tableName);
-      return table.getRow(rowName, columns);
-    } catch (IOException e) {
-      throw new HBaseRestException(e);
+    Get get = new Get(rowName);
+    for(byte [] column : columns) {
+      byte [][] famAndQf = KeyValue.parseColumn(column);
+      get.addColumn(famAndQf[0], famAndQf[1]);
     }
+    return get(tableName, get).getRowResult();
   }
 
+  @Deprecated
   public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns,
       long timestamp) throws HBaseRestException {
-    try {
-      HTable table = new HTable(tableName);
-      return table.getRow(rowName, columns, timestamp);
-    } catch (IOException e) {
-      throw new HBaseRestException(e);
+    Get get = new Get(rowName);
+    for(byte [] column : columns) {
+      byte [][] famAndQf = KeyValue.parseColumn(column);
+      get.addColumn(famAndQf[0], famAndQf[1]);
     }
+    get.setTimeStamp(timestamp);
+    return get(tableName, get).getRowResult();
   }
-
+  
+  @Deprecated
   public RowResult get(byte[] tableName, byte[] rowName, long timestamp)
       throws HBaseRestException {
-    try {
-      HTable table = new HTable(tableName);
-      return table.getRow(rowName, timestamp);
-    } catch (IOException e) {
-      throw new HBaseRestException(e);
-    }
+    Get get = new Get(rowName);
+    get.setTimeStamp(timestamp);
+    return get(tableName, get).getRowResult();
   }
 
   public TimestampsDescriptor getTimestamps(
@@ -98,41 +110,48 @@
 
   }
 
-  public void post(byte[] tableName, BatchUpdate b) throws HBaseRestException {
+  public void post(byte[] tableName, Put put) throws HBaseRestException {
     try {
       HTable table = new HTable(tableName);
-      table.commit(b);
+      table.put(put);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
   }
 
-  public void post(byte[] tableName, List<BatchUpdate> b)
+  public void post(byte[] tableName, List<Put> puts)
       throws HBaseRestException {
     try {
       HTable table = new HTable(tableName);
-      table.commit(b);
+      table.put(puts);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
   }
-
+  
+  @Deprecated
   public void delete(byte[] tableName, byte[] rowName)
       throws HBaseRestException {
-    try {
-      HTable table = new HTable(tableName);
-      table.deleteAll(rowName);
-    } catch (IOException e) {
-      throw new HBaseRestException(e);
-    }
+    Delete delete = new Delete(rowName);
+    delete(tableName, delete);
   }
 
-  public void delete(byte[] tableName, byte[] rowName, byte[][] columns) throws HBaseRestException {
+  @Deprecated
+  public void delete(byte[] tableName, byte[] rowName, byte[][] columns)
+  throws HBaseRestException {
+    Delete delete = new Delete(rowName);
+    for(byte [] column : columns) {
+      byte [][] famAndQf = KeyValue.parseColumn(column);
+      delete.deleteColumn(famAndQf[0], famAndQf[1]);
+    }
+    delete(tableName, delete);
+  }
+  
+  public void delete(byte[] tableName, Delete delete)
+  throws HBaseRestException {
     try {
       HTable table = new HTable(tableName);
-      for (byte[] column : columns) {
-        table.deleteAll(rowName, column);
-      }
+      table.delete(delete);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java Tue Jun 16 04:33:56 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -29,9 +29,10 @@
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier;
 import org.apache.hadoop.hbase.rest.exception.HBaseRestException;
 
@@ -49,20 +50,20 @@
   //
   protected static class ScannerMaster {
 
-    protected static final Map<Integer, Scanner> scannerMap = new ConcurrentHashMap<Integer, Scanner>();
+    protected static final Map<Integer, ResultScanner> scannerMap = new ConcurrentHashMap<Integer, ResultScanner>();
     protected static final AtomicInteger nextScannerId = new AtomicInteger(1);
 
-    public Integer addScanner(Scanner scanner) {
+    public Integer addScanner(ResultScanner scanner) {
       Integer i = Integer.valueOf(nextScannerId.getAndIncrement());
       scannerMap.put(i, scanner);
       return i;
     }
 
-    public Scanner getScanner(Integer id) {
+    public ResultScanner getScanner(Integer id) {
       return scannerMap.get(id);
     }
 
-    public Scanner removeScanner(Integer id) {
+    public ResultScanner removeScanner(Integer id) {
       return scannerMap.remove(id);
     }
 
@@ -71,7 +72,7 @@
      *          id of scanner to close
      */
     public void scannerClose(Integer id) {
-      Scanner s = scannerMap.remove(id);
+      ResultScanner s = scannerMap.remove(id);
       s.close();
     }
   }
@@ -79,7 +80,7 @@
   protected static final ScannerMaster scannerMaster = new ScannerMaster();
 
   /**
-   * returns the next numResults RowResults from the Scaner mapped to Integer
+   * returns the next numResults Results from the Scaner mapped to Integer
    * id. If the end of the table is reached, the scanner is closed and all
    * succesfully retrieved rows are returned.
    * 
@@ -90,14 +91,14 @@
    * @return all successfully retrieved rows.
    * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException
    */
-  public RowResult[] scannerGet(Integer id, Long numRows)
+  public Result[] scannerGet(Integer id, Long numRows)
       throws HBaseRestException {
     try {
-      ArrayList<RowResult> a;
-      Scanner s;
-      RowResult r;
+      ArrayList<Result> a;
+      ResultScanner s;
+      Result r;
 
-      a = new ArrayList<RowResult>();
+      a = new ArrayList<Result>();
       s = scannerMaster.getScanner(id);
 
       if (s == null) {
@@ -114,7 +115,7 @@
         }
       }
 
-      return a.toArray(new RowResult[0]);
+      return a.toArray(new Result[0]);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
@@ -129,13 +130,13 @@
    * @return all rows till end of table
    * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException
    */
-  public RowResult[] scannerGet(Integer id) throws HBaseRestException {
+  public Result[] scannerGet(Integer id) throws HBaseRestException {
     try {
-      ArrayList<RowResult> a;
-      Scanner s;
-      RowResult r;
+      ArrayList<Result> a;
+      ResultScanner s;
+      Result r;
 
-      a = new ArrayList<RowResult>();
+      a = new ArrayList<Result>();
       s = scannerMaster.getScanner(id);
 
       while ((r = s.next()) != null) {
@@ -144,14 +145,14 @@
 
       scannerMaster.scannerClose(id);
 
-      return a.toArray(new RowResult[0]);
+      return a.toArray(new Result[0]);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
   }
 
   public boolean scannerClose(Integer id) throws HBaseRestException {
-    Scanner s = scannerMaster.removeScanner(id);
+    ResultScanner s = scannerMaster.removeScanner(id);
 
     if (s == null) {
       throw new HBaseRestException("Scanner id: " + id + " does not exist");
@@ -208,8 +209,11 @@
     try {
       HTable table;
       table = new HTable(tableName);
+      Scan scan = new Scan();
+      scan.addColumns(columns);
+      scan.setTimeRange(0, timestamp);
       return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner(
-          columns, HConstants.EMPTY_START_ROW, timestamp)));
+          scan)));
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
@@ -225,8 +229,11 @@
     try {
       HTable table;
       table = new HTable(tableName);
+      Scan scan = new Scan(startRow);
+      scan.addColumns(columns);
+      scan.setTimeRange(0, timestamp);
       return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner(
-          columns, startRow, timestamp)));
+          scan)));
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
@@ -243,8 +250,12 @@
     try {
       HTable table;
       table = new HTable(tableName);
+      Scan scan = new Scan();
+      scan.addColumns(columns);
+      scan.setTimeRange(0, timestamp);
+//      scan.setFilter(filter);
       return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner(
-          columns, HConstants.EMPTY_START_ROW, timestamp, filter)));
+          scan)));
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
@@ -261,8 +272,12 @@
     try {
       HTable table;
       table = new HTable(tableName);
+      Scan scan = new Scan(startRow);
+      scan.addColumns(columns);
+      scan.setTimeRange(0, timestamp);
+//      scan.setFilter(filter);
       return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner(
-          columns, startRow, timestamp, filter)));
+          scan)));
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/TableModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/TableModel.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/TableModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/TableModel.java Tue Jun 16 04:33:56 2009
@@ -29,8 +29,9 @@
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.rest.exception.HBaseRestException;
 import org.apache.hadoop.hbase.rest.serializer.IRestSerializer;
 import org.apache.hadoop.hbase.rest.serializer.ISerializable;
@@ -48,7 +49,7 @@
   }
 
   // Get Methods
-  public RowResult[] get(byte[] tableName) throws HBaseRestException {
+  public Result[] get(byte [] tableName) throws HBaseRestException {
     return get(tableName, getColumns(tableName));
   }
 
@@ -63,26 +64,28 @@
    * @return resultant rows
    * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException
    */
-  public RowResult[] get(byte[] tableName, byte[][] columnNames)
+  public Result[] get(byte [] tableName, byte[][] columnNames)
       throws HBaseRestException {
     try {
-      ArrayList<RowResult> a = new ArrayList<RowResult>();
+      ArrayList<Result> a = new ArrayList<Result>();
       HTable table = new HTable(tableName);
 
-      Scanner s = table.getScanner(columnNames);
-      RowResult r;
+      Scan scan = new Scan();
+      scan.addColumns(columnNames);
+      ResultScanner s = table.getScanner(scan);
+      Result r;
 
       while ((r = s.next()) != null) {
         a.add(r);
       }
 
-      return a.toArray(new RowResult[0]);
+      return a.toArray(new Result[0]);
     } catch (Exception e) {
       throw new HBaseRestException(e);
     }
   }
 
-  protected boolean doesTableExist(byte[] tableName) throws HBaseRestException {
+  protected boolean doesTableExist(byte [] tableName) throws HBaseRestException {
     try {
       return this.admin.tableExists(tableName);
     } catch (IOException e) {
@@ -90,7 +93,7 @@
     }
   }
   
-  protected void disableTable(byte[] tableName) throws HBaseRestException {
+  protected void disableTable(byte [] tableName) throws HBaseRestException {
     try {
       this.admin.disableTable(tableName);
     } catch (IOException e) {
@@ -98,7 +101,7 @@
     }
   }
   
-  protected void enableTable(byte[] tableName) throws HBaseRestException {
+  protected void enableTable(byte [] tableName) throws HBaseRestException {
     try {
       this.admin.enableTable(tableName);
     } catch (IOException e) {
@@ -110,7 +113,7 @@
       ArrayList<HColumnDescriptor> columns) throws HBaseRestException {
     HTableDescriptor htc = null;
     try {
-      htc = this.admin.getTableDescriptor(tableName);
+      htc = this.admin.getTableDescriptor(Bytes.toBytes(tableName));
     } catch (IOException e) {
       throw new HBaseRestException("Table does not exist");
     }
@@ -204,7 +207,7 @@
    *         tableName not existing.
    * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException
    */
-  public boolean post(byte[] tableName, HTableDescriptor htd)
+  public boolean post(byte [] tableName, HTableDescriptor htd)
       throws HBaseRestException {
     try {
       if (!this.admin.tableExists(tableName)) {
@@ -225,7 +228,7 @@
    * @return true if table exists and deleted, false if table does not exist.
    * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException
    */
-  public boolean delete(byte[] tableName) throws HBaseRestException {
+  public boolean delete(byte [] tableName) throws HBaseRestException {
     try {
       if (this.admin.tableExists(tableName)) {
         this.admin.disableTable(tableName);
@@ -241,7 +244,7 @@
   public static class Regions implements ISerializable {
     byte[][] regionKey;
 
-    public Regions(byte[][] bs) {
+    public Regions(byte [][] bs) {
       super();
       this.regionKey = bs;
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java Tue Jun 16 04:33:56 2009
@@ -20,11 +20,19 @@
 package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
@@ -39,56 +47,80 @@
     super.initialize(conf, admin);
   }
 
-  public void delete(byte[] tableName, byte[] rowName, long timestamp)
-      throws HBaseRestException {
+  public void delete(byte [] tableName, Delete delete)
+  throws HBaseRestException {
     try {
       HTable table = new HTable(tableName);
-      table.deleteAll(rowName, timestamp);
+      table.delete(delete);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
   }
-
+  
+  @Deprecated
+  public void delete(byte[] tableName, byte[] rowName, long timestamp)
+      throws HBaseRestException {
+    Delete delete = new Delete(rowName, timestamp, null);
+    delete(tableName, delete);
+  }
+  
+  @Deprecated
   public void delete(byte[] tableName, byte[] rowName, byte[][] columns,
       long timestamp) throws HBaseRestException {
-    try {
-      HTable table = new HTable(tableName);
-      for (byte[] column : columns) {
-        table.deleteAll(rowName, column, timestamp);
-      }
-    } catch (IOException e) {
-      throw new HBaseRestException(e);
+    Delete delete  = new Delete(rowName, timestamp, null);
+    for(byte [] column : columns) {
+      byte [][] famAndQf = KeyValue.parseColumn(column);
+      delete.deleteColumn(famAndQf[0], famAndQf[1]);
     }
+    delete(tableName, delete);
   }
 
-  public Cell get(byte[] tableName, byte[] rowName, byte[] columnName,
-      long timestamp) throws HBaseRestException {
+  public Result get(final byte [] tableName, final Get get)
+  throws HBaseRestException {
     try {
       HTable table = new HTable(tableName);
-      return table.get(rowName, columnName, timestamp, 1)[0];
+      return table.get(get);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }
   }
+  
+  @Deprecated
+  public Cell get(byte[] tableName, byte[] rowName, byte[] columnName,
+      long timestamp) throws HBaseRestException {
+    Get get = new Get(rowName);
+    byte [][] famAndQf = KeyValue.parseColumn(columnName); 
+    get.addColumn(famAndQf[0], famAndQf[1]);
+    get.setTimeStamp(timestamp);
+    return get(tableName, get).getCellValue(famAndQf[0], famAndQf[1]);
+  }
 
+  @Deprecated
   public Cell[] get(byte[] tableName, byte[] rowName, byte[] columnName,
-      long timestamp, int numVersions) throws HBaseRestException {
-    try {
-      HTable table = new HTable(tableName);
-      return table.get(rowName, columnName, timestamp, numVersions);
-    } catch (IOException e) {
-      throw new HBaseRestException(e);
+      long timestamp, int numVersions) throws IOException, HBaseRestException {
+    Get get = new Get(rowName);
+    byte [][] famAndQf = KeyValue.parseColumn(columnName); 
+    get.addColumn(famAndQf[0], famAndQf[1]);
+    get.setTimeStamp(timestamp);
+    get.setMaxVersions(numVersions);
+    Result result = get(tableName, get);
+    List<Cell> cells = new ArrayList<Cell>();
+    for(KeyValue kv : result.sorted()) {
+      cells.add(new Cell(kv.getValue(), kv.getTimestamp()));
     }
+    return cells.toArray(new Cell [0]);
   }
 
+  @Deprecated
   public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns,
       long timestamp) throws HBaseRestException {
-    try {
-      HTable table = new HTable(tableName);
-      return table.getRow(rowName, columns, timestamp);
-    } catch (IOException e) {
-      throw new HBaseRestException(e);
+    Get get = new Get(rowName);
+    for(byte [] column : columns) {
+      byte [][] famAndQf = KeyValue.parseColumn(column);
+      get.addColumn(famAndQf[0], famAndQf[1]);
     }
+    get.setTimeStamp(timestamp);
+    return get(tableName, get).getRowResult();
   }
 
   /**
@@ -100,25 +132,20 @@
    */
   public RowResult get(byte[] tableName, byte[] rowName, long timestamp)
       throws HBaseRestException {
-    try {
-      HTable table = new HTable(tableName);
-      return table.getRow(rowName, timestamp);
-    } catch (IOException e) {
-      throw new HBaseRestException(e);
-    }
+    Get get = new Get(rowName);
+    get.setTimeStamp(timestamp);
+    return get(tableName, get).getRowResult();
   }
 
   public void post(byte[] tableName, byte[] rowName, byte[] columnName,
       long timestamp, byte[] value) throws HBaseRestException {
     try {
-      HTable table;
-      BatchUpdate b;
-
-      table = new HTable(tableName);
-      b = new BatchUpdate(rowName, timestamp);
-
-      b.put(columnName, value);
-      table.commit(b);
+      HTable table = new HTable(tableName);
+      Put put = new Put(rowName);
+      put.setTimeStamp(timestamp);
+      byte [][] famAndQf = KeyValue.parseColumn(columnName);
+      put.add(famAndQf[0], famAndQf[1], value);
+      table.put(put);
     } catch (IOException e) {
       throw new HBaseRestException(e);
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java Tue Jun 16 04:33:56 2009
@@ -96,7 +96,6 @@
     String compression = HColumnDescriptor.DEFAULT_COMPRESSION;
     boolean in_memory = HColumnDescriptor.DEFAULT_IN_MEMORY;
     boolean block_cache = HColumnDescriptor.DEFAULT_BLOCKCACHE;
-    int max_cell_size = HColumnDescriptor.DEFAULT_LENGTH;
     int ttl = HColumnDescriptor.DEFAULT_TTL;
     boolean bloomfilter = HColumnDescriptor.DEFAULT_BLOOMFILTER;
 
@@ -108,7 +107,6 @@
         // compression = currentCDesp.getCompression();
         in_memory = currentCDesp.isInMemory();
         block_cache = currentCDesp.isBlockCacheEnabled();
-        max_cell_size = currentCDesp.getMaxValueLength();
         ttl = currentCDesp.getTimeToLive();
         bloomfilter = currentCDesp.isBloomfilter();
       }
@@ -141,13 +139,6 @@
           .getNodeValue());
     }
 
-    NodeList max_cell_size_list = columnfamily
-        .getElementsByTagName("max-cell-size");
-    if (max_cell_size_list.getLength() > 0) {
-      max_cell_size = Integer.valueOf(max_cell_size_list.item(0)
-          .getFirstChild().getNodeValue());
-    }
-
     NodeList ttl_list = columnfamily.getElementsByTagName("time-to-live");
     if (ttl_list.getLength() > 0) {
       ttl = Integer.valueOf(ttl_list.item(0).getFirstChild().getNodeValue());
@@ -162,7 +153,7 @@
 
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(colname),
         max_versions, compression, in_memory, block_cache,
-        max_cell_size, ttl, bloomfilter);
+        ttl, bloomfilter);
 
     NodeList metadataList = columnfamily.getElementsByTagName("metadata");
     for (int i = 0; i < metadataList.getLength(); i++) {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java Tue Jun 16 04:33:56 2009
@@ -163,10 +163,6 @@
     printer.print("<max-versions>");
     printer.print(column.getMaxVersions());
     printer.print("</max-versions>");
-    // max-length
-    printer.print("<max-length>");
-    printer.print(column.getMaxValueLength());
-    printer.print("</max-length>");
     printer.print("</columnfamily>");
     printer.flush();
   }



Mime
View raw message