hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From raw...@apache.org
Subject svn commit: r782178 [5/16] - in /hadoop/hbase/trunk: bin/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/client/tableindexed/ src/java/org/apache/hadoop/hbase/client/transactional/ src/java/o...
Date Sat, 06 Jun 2009 01:26:27 GMT
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/TimeRange.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/TimeRange.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/TimeRange.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/TimeRange.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Represents an interval of version timestamps.
+ * <p>
+ * Evaluated according to minStamp <= timestamp < maxStamp
+ * or [minStamp,maxStamp) in interval notation.
+ * <p>
+ * Only used internally; should not be accessed directly by clients.
+ */
+public class TimeRange implements Writable {
+  private long minStamp = 0L;
+  private long maxStamp = Long.MAX_VALUE;
+  private boolean allTime = false;
+
+  /**
+   * Default constructor.
+   * Represents interval [0, Long.MAX_VALUE) (allTime)
+   */
+  public TimeRange() {
+    allTime = true;
+  }
+  
+  /**
+   * Represents interval [minStamp, Long.MAX_VALUE)
+   * @param minStamp the minimum timestamp value, inclusive
+   */
+  public TimeRange(long minStamp) {
+    this.minStamp = minStamp;
+  }
+  
+  /**
+   * Represents interval [minStamp, Long.MAX_VALUE)
+   * @param minStamp the minimum timestamp value, inclusive
+   */
+  public TimeRange(byte [] minStamp) {
+  	this.minStamp = Bytes.toLong(minStamp);
+  }
+  
+  /**
+   * Represents interval [minStamp, maxStamp) 
+   * @param minStamp the minimum timestamp, inclusive
+   * @param maxStamp the maximum timestamp, exclusive
+   * @throws IOException
+   */
+  public TimeRange(long minStamp, long maxStamp)
+  throws IOException {
+    if(maxStamp < minStamp) {
+      throw new IOException("maxStamp is smaller than minStamp");
+    }
+    this.minStamp = minStamp;
+    this.maxStamp = maxStamp;
+  }
+
+  /**
+   * Represents interval [minStamp, maxStamp) 
+   * @param minStamp the minimum timestamp, inclusive
+   * @param maxStamp the maximum timestamp, exclusive
+   * @throws IOException
+   */
+  public TimeRange(byte [] minStamp, byte [] maxStamp)
+  throws IOException {
+    this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp));
+  }
+  
+  /**
+   * @return the smallest timestamp that should be considered
+   */
+  public long getMin() {
+    return minStamp;
+  }
+
+  /**
+   * @return the biggest timestamp that should be considered
+   */
+  public long getMax() {
+    return maxStamp;
+  }
+  
+  /**
+   * Check if the specified timestamp is within this TimeRange.
+   * <p>
+   * Returns true if within interval [minStamp, maxStamp), false 
+   * if not.
+   * @param bytes timestamp to check
+   * @param offset offset into the bytes
+   * @return true if within TimeRange, false if not
+   */
+  public boolean withinTimeRange(byte [] bytes, int offset) {
+  	if(allTime) return true;
+  	return withinTimeRange(Bytes.toLong(bytes, offset));
+  }
+  
+  /**
+   * Check if the specified timestamp is within this TimeRange.
+   * <p>
+   * Returns true if within interval [minStamp, maxStamp), false 
+   * if not.
+   * @param timestamp timestamp to check
+   * @return true if within TimeRange, false if not
+   */
+  public boolean withinTimeRange(long timestamp) {
+  	if(allTime) return true;
+  	// check if >= minStamp
+  	return (minStamp <= timestamp && timestamp < maxStamp);
+  }
+  
+  /**
+   * Check if the specified timestamp is within this TimeRange.
+   * <p>
+   * Returns true if within interval [minStamp, maxStamp), false 
+   * if not.
+   * @param timestamp timestamp to check
+   * @return true if within TimeRange, false if not
+   */
+  public boolean withinOrAfterTimeRange(long timestamp) {
+    if(allTime) return true;
+    // check if >= minStamp
+    return (timestamp >= minStamp);
+  }
+  
+  @Override
+  public String toString() {
+    StringBuffer sb = new StringBuffer();
+    sb.append("maxStamp=");
+    sb.append(this.maxStamp);
+    sb.append(", minStamp=");
+    sb.append(this.minStamp);
+    return sb.toString();
+  }
+  
+  //Writable
+  public void readFields(final DataInput in) throws IOException {
+    this.minStamp = in.readLong();
+    this.maxStamp = in.readLong();
+    this.allTime = in.readBoolean();
+  }
+  
+  public void write(final DataOutput out) throws IOException {
+    out.writeLong(minStamp);
+    out.writeLong(maxStamp);
+    out.writeBoolean(this.allTime);
+  }
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Sat Jun  6 01:26:21 2009
@@ -104,11 +104,6 @@
  * <pre>&lt;fileinfo>&lt;trailer></pre>.  That is, there are not data nor meta
  * blocks present.
  * <p>
- * TODO: Bloomfilters.  Need to add hadoop 0.20. first since it has bug fixes
- * on the hadoop bf package.
- *  * TODO: USE memcmp by default?  Write the keys out in an order that allows
- * my using this -- reverse the timestamp.
- * TODO: Add support for fast-gzip and for lzo.
  * TODO: Do scanners need to be able to take a start and end row?
  * TODO: Should BlockIndex know the name of its file?  Should it have a Path
  * that points at its file say for the case where an index lives apart from
@@ -465,8 +460,12 @@
      * Add key/value to file.
      * Keys must be added in an order that agrees with the Comparator passed
      * on construction.
-     * @param key Key to add.  Cannot be empty nor null.
-     * @param value Value to add.  Cannot be empty nor null.
+     * @param key
+     * @param koffset
+     * @param klength
+     * @param value
+     * @param voffset
+     * @param vlength
      * @throws IOException
      */
     public void append(final byte [] key, final int koffset, final int klength,
@@ -1039,6 +1038,9 @@
       }
       
       public KeyValue getKeyValue() {
+        if(this.block == null) {
+          return null;
+        }
         return new KeyValue(this.block.array(),
             this.block.arrayOffset() + this.block.position() - 8);
       }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java Sat Jun  6 01:26:21 2009
@@ -24,6 +24,9 @@
   private ReferenceQueue q = new ReferenceQueue();
   public int dumps = 0;
   
+  /**
+   * Constructor
+   */
   public SimpleBlockCache() {
     super();
   }
@@ -36,6 +39,9 @@
     }
   }
 
+  /**
+   * @return the size
+   */
   public synchronized int size() {
     processQueue();
     return cache.size();

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java Sat Jun  6 01:26:21 2009
@@ -390,6 +390,7 @@
    * @param addr
    * @param conf
    * @param maxAttempts
+   * @param timeout
    * @return proxy
    * @throws IOException
    */

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java Sat Jun  6 01:26:21 2009
@@ -86,7 +86,8 @@
   public static final Log LOG =
     LogFactory.getLog("org.apache.hadoop.ipc.HBaseServer");
 
-  protected static final ThreadLocal<HBaseServer> SERVER = new ThreadLocal<HBaseServer>();
+  protected static final ThreadLocal<HBaseServer> SERVER =
+    new ThreadLocal<HBaseServer>();
 
   /** Returns the server instance called under or null.  May be called under
    * {@link #call(Writable, long)} implementations, and under {@link Writable}
@@ -128,10 +129,11 @@
   private int handlerCount;                       // number of handler threads
   protected Class<? extends Writable> paramClass; // class of call parameters
   protected int maxIdleTime;                      // the maximum idle time after 
-                                                  // which a client may be disconnected
-  protected int thresholdIdleConnections;         // the number of idle connections
-                                                  // after which we will start
-                                                  // cleaning up idle 
+                                                  // which a client may be
+                                                  // disconnected
+  protected int thresholdIdleConnections;         // the number of idle
+                                                  // connections after which we 
+                                                  // will start cleaning up idle 
                                                   // connections
   int maxConnectionsToNuke;                       // the max number of 
                                                   // connections to nuke
@@ -173,8 +175,9 @@
     try {
       socket.bind(address, backlog);
     } catch (BindException e) {
-      BindException bindException = new BindException("Problem binding to " + address
-                                                      + " : " + e.getMessage());
+      BindException bindException =
+        new BindException("Problem binding to " + address + " : " + 
+            e.getMessage());
       bindException.initCause(e);
       throw bindException;
     } catch (SocketException e) {
@@ -297,7 +300,6 @@
     public void run() {
       LOG.info(getName() + ": starting");
       SERVER.set(HBaseServer.this);
-      long lastPurgeTime = 0;   // last check for old calls.
 
       while (running) {
         SelectionKey key = null;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java Sat Jun  6 01:26:21 2009
@@ -23,6 +23,7 @@
 
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.io.Writable;
 
@@ -109,7 +110,7 @@
    * @param args
    * @throws IOException
    */
-  public void modifyTable(byte[] tableName, int op, Writable[] args)
+  public void modifyTable(byte[] tableName, HConstants.Modify op, Writable[] args)
     throws IOException;
 
   /**

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java Sat Jun  6 01:26:21 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -21,15 +21,15 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 
 /**
  * Clients interact with HRegionServers using a handle to the HRegionInterface.
@@ -49,21 +49,6 @@
   public HRegionInfo getRegionInfo(final byte [] regionName)
   throws NotServingRegionException;
   
-  /**
-   * Get the specified number of versions of the specified row and column with
-   * the specified timestamp.
-   *
-   * @param regionName region name
-   * @param row row key
-   * @param column column key
-   * @param timestamp timestamp
-   * @param numVersions number of versions to return
-   * @return array of values
-   * @throws IOException
-   */
-  public Cell[] get(final byte [] regionName, final byte [] row,
-    final byte [] column, final long timestamp, final int numVersions)
-  throws IOException;
 
   /**
    * Return all the data for the row that matches <i>row</i> exactly, 
@@ -71,159 +56,104 @@
    * 
    * @param regionName region name
    * @param row row key
-   * @param columnFamily Column family to look for row in.
+   * @param family Column family to look for row in.
    * @return map of values
    * @throws IOException
    */
-  public RowResult getClosestRowBefore(final byte [] regionName,
-    final byte [] row, final byte [] columnFamily)
+  public Result getClosestRowBefore(final byte [] regionName,
+    final byte [] row, final byte [] family)
   throws IOException;
 
   /**
-   * Get selected columns for the specified row at a given timestamp.
    * 
-   * @param regionName region name
-   * @param row row key
-   * @param columns columns to get
-   * @param ts time stamp
-   * @param numVersions number of versions
-   * @param lockId lock id
-   * @return map of values
-   * @throws IOException
+   * @return the regions served by this regionserver
    */
-  public RowResult getRow(final byte [] regionName, final byte [] row, 
-    final byte[][] columns, final long ts,
-    final int numVersions, final long lockId)
-  throws IOException;
-
-  /**
-   * Applies a batch of updates via one RPC
-   * 
-   * @param regionName name of the region to update
-   * @param b BatchUpdate
-   * @param lockId lock id
-   * @throws IOException
-   */
-  public void batchUpdate(final byte [] regionName, final BatchUpdate b,
-      final long lockId)
-  throws IOException;
-  
-  /**
-   * Applies a batch of updates via one RPC for many rows
-   * 
-   * @param regionName name of the region to update
-   * @param b BatchUpdate[]
-   * @throws IOException
-   * @return number of updates applied
-   */
-  public int batchUpdates(final byte[] regionName, final BatchUpdate[] b)
-  throws IOException;
+  public HRegion [] getOnlineRegionsAsArray();
   
   /**
-   * Applies a batch of updates to one row atomically via one RPC
-   * if the columns specified in expectedValues match
-   * the given values in expectedValues
-   * 
-   * @param regionName name of the region to update
-   * @param b BatchUpdate
-   * @param expectedValues map of column names to expected data values.
-   * @return true if update was applied
+   * Perform Get operation.
+   * @param regionName name of region to get from
+   * @param get Get operation
+   * @return Result
    * @throws IOException
    */
-  public boolean checkAndSave(final byte [] regionName, final BatchUpdate b,
-      final HbaseMapWritable<byte[],byte[]> expectedValues)
-  throws IOException;
-  
+  public Result get(byte [] regionName, Get get) throws IOException;
 
   /**
-   * Delete all cells that match the passed row and column and whose timestamp
-   * is equal-to or older than the passed timestamp.
-   * 
-   * @param regionName region name
-   * @param row row key
-   * @param column column key
-   * @param timestamp Delete all entries that have this timestamp or older
-   * @param lockId lock id
+   * Perform exists operation.
+   * @param regionName name of region to get from
+   * @param get Get operation describing cell to test
+   * @return true if exists
    * @throws IOException
    */
-  public void deleteAll(byte [] regionName, byte [] row, byte [] column,
-    long timestamp, long lockId)
-  throws IOException;
+  public boolean exists(byte [] regionName, Get get) throws IOException;
 
   /**
-   * Delete all cells that match the passed row and whose
-   * timestamp is equal-to or older than the passed timestamp.
-   *
-   * @param regionName region name
-   * @param row row key
-   * @param timestamp Delete all entries that have this timestamp or older
-   * @param lockId lock id
+   * Put data into the specified region 
+   * @param regionName
+   * @param put the data to be put
    * @throws IOException
    */
-  public void deleteAll(byte [] regionName, byte [] row, long timestamp,
-      long lockId)
+  public void put(final byte [] regionName, final Put put)
   throws IOException;
   
   /**
-   * Delete all cells that match the passed row & the column regex and whose
-   * timestamp is equal-to or older than the passed timestamp.
-   * 
+   * Put an array of puts into the specified region
    * @param regionName
-   * @param row
-   * @param colRegex
-   * @param timestamp
-   * @param lockId
+   * @param puts
+   * @return
    * @throws IOException
    */
-  public void deleteAllByRegex(byte [] regionName, byte [] row, String colRegex, 
-      long timestamp, long lockId)
+  public int put(final byte[] regionName, final Put [] puts)
   throws IOException;
-
+  
+  
   /**
-   * Delete all cells for a row with matching column family with timestamps
-   * less than or equal to <i>timestamp</i>.
-   *
-   * @param regionName The name of the region to operate on
-   * @param row The row to operate on
-   * @param family The column family to match
-   * @param timestamp Timestamp to match
-   * @param lockId lock id
+   * Deletes all the KeyValues that match those found in the Delete object, 
+   * if their ts <= to the Delete. In case of a delete with a specific ts it
+   * only deletes that specific KeyValue.
+   * @param regionName
+   * @param delete
    * @throws IOException
    */
-  public void deleteFamily(byte [] regionName, byte [] row, byte [] family, 
-    long timestamp, long lockId)
+  public void delete(final byte[] regionName, final Delete delete)
   throws IOException;
   
   /**
-   * Delete all cells for a row with matching column family regex with 
-   * timestamps less than or equal to <i>timestamp</i>.
+   * Atomically checks if a row/family/qualifier value match the expectedValue.
+   * If it does, it adds the put.
    * 
-   * @param regionName The name of the region to operate on
-   * @param row The row to operate on
-   * @param familyRegex column family regex
-   * @param timestamp Timestamp to match
-   * @param lockId lock id
+   * @param regionName
+   * @param row
+   * @param family
+   * @param qualifier
+   * @param value the expected value
+   * @param put
    * @throws IOException
+   * @return true if the new put was execute, false otherwise
    */
-  public void deleteFamilyByRegex(byte [] regionName, byte [] row, String familyRegex, 
-    long timestamp, long lockId) 
+  public boolean checkAndPut(final byte[] regionName, final byte [] row, 
+      final byte [] family, final byte [] qualifier, final byte [] value,
+      final Put put)
   throws IOException;
-
+  
   /**
-   * Returns true if any cells exist for the given coordinate.
+   * Atomically increments a column value. If the column value isn't long-like,
+   * this could throw an exception.
    * 
-   * @param regionName The name of the region
-   * @param row The row
-   * @param column The column, or null for any
-   * @param timestamp The timestamp, or LATEST_TIMESTAMP for any
-   * @param lockID lock id
-   * @return true if the row exists, false otherwise
+   * @param regionName
+   * @param row
+   * @param family
+   * @param qualifier
+   * @param amount
+   * @return new incremented column value
    * @throws IOException
    */
-  public boolean exists(byte [] regionName, byte [] row, byte [] column, 
-    long timestamp, long lockID)
+  public long incrementColumnValue(byte [] regionName, byte [] row, 
+      byte [] family, byte [] qualifier, long amount)
   throws IOException;
-
+  
+  
   //
   // remote scanner interface
   //
@@ -232,20 +162,11 @@
    * Opens a remote scanner with a RowFilter.
    * 
    * @param regionName name of region to scan
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex for column family name. A column name is judged to be
-   * regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row to scan
-   * @param timestamp only return values whose timestamp is <= this value
-   * @param filter RowFilter for filtering results at the row-level.
-   *
+   * @param scan configured scan object
    * @return scannerId scanner identifier used in other calls
    * @throws IOException
    */
-  public long openScanner(final byte [] regionName, final byte [][] columns,
-      final byte [] startRow, long timestamp, RowFilterInterface filter)
+  public long openScanner(final byte [] regionName, final Scan scan)
   throws IOException;
   
   /**
@@ -254,7 +175,7 @@
    * @return map of values
    * @throws IOException
    */
-  public RowResult next(long scannerId) throws IOException;
+  public Result next(long scannerId) throws IOException;
   
   /**
    * Get the next set of values
@@ -263,7 +184,7 @@
    * @return map of values
    * @throws IOException
    */
-  public RowResult[] next(long scannerId, int numberOfRows) throws IOException;
+  public Result [] next(long scannerId, int numberOfRows) throws IOException;
   
   /**
    * Close a scanner
@@ -272,7 +193,7 @@
    * @throws IOException
    */
   public void close(long scannerId) throws IOException;
-  
+
   /**
    * Opens a remote row lock.
    *
@@ -294,19 +215,6 @@
   public void unlockRow(final byte [] regionName, final long lockId)
   throws IOException;
   
-  /**
-   * Atomically increments a column value. If the column value isn't long-like, this could
-   * throw an exception.
-   * 
-   * @param regionName
-   * @param row
-   * @param column
-   * @param amount
-   * @return new incremented column value
-   * @throws IOException
-   */
-  public long incrementColumnValue(byte [] regionName, byte [] row,
-      byte [] column, long amount) throws IOException;
   
   /**
    * Method used when a master is taking the place of another failed one.

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/TransactionalRegionInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/TransactionalRegionInterface.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/TransactionalRegionInterface.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/TransactionalRegionInterface.java Sat Jun  6 01:26:21 2009
@@ -19,7 +19,8 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
@@ -136,33 +137,24 @@
    * 
    * @param transactionId
    * @param regionName region name
-   * @param row row key
+   * @param delete
    * @param timestamp Delete all entries that have this timestamp or older
    * @throws IOException
    */
-  public void deleteAll(long transactionId, byte[] regionName, byte[] row,
-      long timestamp) throws IOException;
-
+  public void delete(long transactionId, byte [] regionName, Delete delete)
+  throws IOException;
+  
   /**
    * Opens a remote scanner with a RowFilter.
    * 
    * @param transactionId
    * @param regionName name of region to scan
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned. Its also possible to
-   * pass a regex for column family name. A column name is judged to be regex if
-   * it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row to scan
-   * @param timestamp only return values whose timestamp is <= this value
-   * @param filter RowFilter for filtering results at the row-level.
-   * 
+   * @param scan
    * @return scannerId scanner identifier used in other calls
    * @throws IOException
    */
   public long openScanner(final long transactionId, final byte[] regionName,
-      final byte[][] columns, final byte[] startRow, long timestamp,
-      RowFilterInterface filter) throws IOException;
+      Scan scan) throws IOException;
 
   /**
    * Applies a batch of updates via one RPC

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java Sat Jun  6 01:26:21 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2008 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -28,7 +28,9 @@
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.UnknownScannerException;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.filter.RowFilterInterface;
 import org.apache.hadoop.hbase.filter.RowFilterSet;
 import org.apache.hadoop.hbase.filter.StopRowFilter;
@@ -73,7 +75,7 @@
  * </pre>
  */
 public abstract class TableInputFormatBase
-implements InputFormat<ImmutableBytesWritable, RowResult> {
+implements InputFormat<ImmutableBytesWritable, Result> {
   final Log LOG = LogFactory.getLog(TableInputFormatBase.class);
   private byte [][] inputColumns;
   private HTable table;
@@ -84,12 +86,12 @@
    * Iterate over an HBase table data, return (Text, RowResult) pairs
    */
   protected class TableRecordReader
-  implements RecordReader<ImmutableBytesWritable, RowResult> {
+  implements RecordReader<ImmutableBytesWritable, Result> {
     private byte [] startRow;
     private byte [] endRow;
     private byte [] lastRow;
     private RowFilterInterface trrRowFilter;
-    private Scanner scanner;
+    private ResultScanner scanner;
     private HTable htable;
     private byte [][] trrInputColumns;
 
@@ -106,16 +108,21 @@
             new HashSet<RowFilterInterface>();
           rowFiltersSet.add(new WhileMatchRowFilter(new StopRowFilter(endRow)));
           rowFiltersSet.add(trrRowFilter);
-          this.scanner = this.htable.getScanner(trrInputColumns, startRow,
-            new RowFilterSet(RowFilterSet.Operator.MUST_PASS_ALL,
-              rowFiltersSet));
+          Scan scan = new Scan(startRow);
+          scan.addColumns(trrInputColumns);
+//          scan.setFilter(new RowFilterSet(RowFilterSet.Operator.MUST_PASS_ALL,
+//              rowFiltersSet));
+          this.scanner = this.htable.getScanner(scan);
         } else {
-          this.scanner =
-            this.htable.getScanner(trrInputColumns, firstRow, endRow);
+          Scan scan = new Scan(firstRow, endRow);
+          scan.addColumns(trrInputColumns);
+          this.scanner = this.htable.getScanner(scan);
         }
       } else {
-        this.scanner =
-          this.htable.getScanner(trrInputColumns, firstRow, trrRowFilter);
+        Scan scan = new Scan(firstRow);
+        scan.addColumns(trrInputColumns);
+//        scan.setFilter(trrRowFilter);
+        this.scanner = this.htable.getScanner(scan);
       }
     }
 
@@ -182,8 +189,8 @@
      *
      * @see org.apache.hadoop.mapred.RecordReader#createValue()
      */
-    public RowResult createValue() {
-      return new RowResult();
+    public Result createValue() {
+      return new Result();
     }
 
     public long getPos() {
@@ -203,9 +210,9 @@
      * @return true if there was more data
      * @throws IOException
      */
-    public boolean next(ImmutableBytesWritable key, RowResult value)
+    public boolean next(ImmutableBytesWritable key, Result value)
     throws IOException {
-      RowResult result;
+      Result result;
       try {
         result = this.scanner.next();
       } catch (UnknownScannerException e) {
@@ -232,7 +239,7 @@
    * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
    *      JobConf, Reporter)
    */
-  public RecordReader<ImmutableBytesWritable, RowResult> getRecordReader(
+  public RecordReader<ImmutableBytesWritable, Result> getRecordReader(
       InputSplit split, JobConf job, Reporter reporter)
   throws IOException {
     TableSplit tSplit = (TableSplit) split;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java Sat Jun  6 01:26:21 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -26,6 +26,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.mapred.FileAlreadyExistsException;
@@ -40,7 +41,7 @@
  * Convert Map/Reduce output and write it to an HBase table
  */
 public class TableOutputFormat extends
-FileOutputFormat<ImmutableBytesWritable, BatchUpdate> {
+FileOutputFormat<ImmutableBytesWritable, Put> {
 
   /** JobConf parameter that specifies the output table */
   public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";
@@ -51,7 +52,7 @@
    * and write to an HBase table
    */
   protected static class TableRecordWriter
-    implements RecordWriter<ImmutableBytesWritable, BatchUpdate> {
+    implements RecordWriter<ImmutableBytesWritable, Put> {
     private HTable m_table;
 
     /**
@@ -69,8 +70,8 @@
     }
 
     public void write(ImmutableBytesWritable key,
-        BatchUpdate value) throws IOException {
-      m_table.commit(new BatchUpdate(value));
+        Put value) throws IOException {
+      m_table.put(new Put(value));
     }
   }
   

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java Sat Jun  6 01:26:21 2009
@@ -38,8 +38,9 @@
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -147,16 +148,18 @@
 
     // Array to hold list of split parents found.  Scan adds to list.  After
     // scan we go check if parents can be removed.
-    Map<HRegionInfo, RowResult> splitParents =
-      new HashMap<HRegionInfo, RowResult>();
+    Map<HRegionInfo, Result> splitParents =
+      new HashMap<HRegionInfo, Result>();
     List<byte []> emptyRows = new ArrayList<byte []>();
     int rows = 0;
     try {
       regionServer = master.connection.getHRegionConnection(region.getServer());
+      
       scannerId = regionServer.openScanner(region.getRegionName(),
-        COLUMN_FAMILY_ARRAY, EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
+          new Scan().addFamily(HConstants.CATALOG_FAMILY));
       while (true) {
-        RowResult values = regionServer.next(scannerId);
+        Result values = regionServer.next(scannerId);
+        
         if (values == null || values.size() == 0) {
           break;
         }
@@ -165,8 +168,16 @@
           emptyRows.add(values.getRow());
           continue;
         }
-        String serverName = Writables.cellToString(values.get(COL_SERVER));
-        long startCode = Writables.cellToLong(values.get(COL_STARTCODE));
+        String serverName = "";
+        byte [] val = values.getValue(CATALOG_FAMILY, SERVER_QUALIFIER);
+        if( val != null) {
+          serverName = Bytes.toString(val);
+        }
+        long startCode = 0L;
+        val = values.getValue(CATALOG_FAMILY, STARTCODE_QUALIFIER);
+        if(val != null) {
+          startCode = Bytes.toLong(val);
+        }
 
         // Note Region has been assigned.
         checkAssigned(info, serverName, startCode);
@@ -213,7 +224,7 @@
     // Take a look at split parents to see if any we can clean up.
     
     if (splitParents.size() > 0) {
-      for (Map.Entry<HRegionInfo, RowResult> e : splitParents.entrySet()) {
+      for (Map.Entry<HRegionInfo, Result> e : splitParents.entrySet()) {
         HRegionInfo hri = e.getKey();
         cleanupSplits(region.getRegionName(), regionServer, hri, e.getValue());
       }
@@ -250,13 +261,13 @@
    */
   private boolean cleanupSplits(final byte [] metaRegionName, 
     final HRegionInterface srvr, final HRegionInfo parent,
-    RowResult rowContent)
+    Result rowContent)
   throws IOException {
     boolean result = false;
     boolean hasReferencesA = hasReferences(metaRegionName, srvr,
-        parent.getRegionName(), rowContent, COL_SPLITA);
+        parent.getRegionName(), rowContent, CATALOG_FAMILY, SPLITA_QUALIFIER);
     boolean hasReferencesB = hasReferences(metaRegionName, srvr,
-        parent.getRegionName(), rowContent, COL_SPLITB);
+        parent.getRegionName(), rowContent, CATALOG_FAMILY, SPLITB_QUALIFIER);
     if (!hasReferencesA && !hasReferencesB) {
       LOG.info("Deleting region " + parent.getRegionNameAsString() +
         " (encoded=" + parent.getEncodedName() +
@@ -283,15 +294,16 @@
    */
   private boolean hasReferences(final byte [] metaRegionName, 
     final HRegionInterface srvr, final byte [] parent,
-    RowResult rowContent, final byte [] splitColumn)
+    Result rowContent, final byte [] splitFamily, byte [] splitQualifier)
   throws IOException {
     boolean result = false;
     HRegionInfo split =
-      Writables.getHRegionInfo(rowContent.get(splitColumn));
+      Writables.getHRegionInfo(rowContent.getValue(splitFamily, splitQualifier));
     if (split == null) {
       return result;
     }
-    Path tabledir = new Path(this.master.rootdir, split.getTableDesc().getNameAsString());
+    Path tabledir =
+      new Path(this.master.rootdir, split.getTableDesc().getNameAsString());
     for (HColumnDescriptor family: split.getTableDesc().getFamilies()) {
       Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(),
         family.getName());
@@ -320,10 +332,10 @@
         " no longer has references to " + Bytes.toString(parent));
     }
     
-    BatchUpdate b = new BatchUpdate(parent);
-    b.delete(splitColumn);
-    srvr.batchUpdate(metaRegionName, b, -1L);
-      
+    Delete delete = new Delete(parent);
+    delete.deleteColumns(splitFamily, splitQualifier);
+    srvr.delete(metaRegionName, delete);
+    
     return result;
   }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java Sat Jun  6 01:26:21 2009
@@ -27,6 +27,8 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Writables;
@@ -78,11 +80,13 @@
       }
 
       // Update meta table
-      BatchUpdate b = new BatchUpdate(i.getRegionName());
-      updateRegionInfo(b, i);
-      b.delete(COL_SERVER);
-      b.delete(COL_STARTCODE);
-      server.batchUpdate(m.getRegionName(), b, -1L);
+      Put put = updateRegionInfo(i);
+      server.put(m.getRegionName(), put);
+      
+      Delete delete = new Delete(i.getRegionName());
+      delete.deleteColumns(CATALOG_FAMILY, SERVER_QUALIFIER);
+      delete.deleteColumns(CATALOG_FAMILY, STARTCODE_QUALIFIER);
+      server.delete(m.getRegionName(), delete);
       if (LOG.isDebugEnabled()) {
         LOG.debug("Updated columns in row: " + i.getRegionNameAsString());
       }
@@ -125,9 +129,11 @@
     servedRegions.clear();
   }
 
-  protected void updateRegionInfo(final BatchUpdate b, final HRegionInfo i)
+  protected Put updateRegionInfo(final HRegionInfo i)
   throws IOException {
     i.setOffline(!online);
-    b.put(COL_REGIONINFO, Writables.getBytes(i));
+    Put put = new Put(i.getRegionName());
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(i));
+    return put;
   }
 }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java Sat Jun  6 01:26:21 2009
@@ -25,7 +25,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.util.Writables;
 
@@ -47,9 +47,9 @@
 
   protected void updateRegionInfo(HRegionInterface server, byte [] regionName,
     HRegionInfo i) throws IOException {
-    BatchUpdate b = new BatchUpdate(i.getRegionName());
-    b.put(COL_REGIONINFO, Writables.getBytes(i));
-    server.batchUpdate(regionName, b, -1L);
+    Put put = new Put(i.getRegionName());
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(i));
+    server.put(regionName, put);
     if (LOG.isDebugEnabled()) {
       LOG.debug("updated columns in row: " + i.getRegionNameAsString());
     }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Sat Jun  6 01:26:21 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -23,16 +23,15 @@
 import java.lang.management.ManagementFactory;
 import java.lang.management.RuntimeMXBean;
 import java.lang.reflect.Constructor;
-import java.net.InetAddress;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.NavigableMap;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.DelayQueue;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.PriorityBlockingQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -51,19 +50,20 @@
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.HServerLoad;
-import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.RegionHistorian;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.ServerConnection;
 import org.apache.hadoop.hbase.client.ServerConnectionManager;
-import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.ipc.HBaseRPC;
 import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
 import org.apache.hadoop.hbase.ipc.HBaseServer;
@@ -714,12 +714,14 @@
     byte [] metaRegionName = m.getRegionName();
     HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
     byte[] firstRowInTable = Bytes.toBytes(tableName + ",,");
-    long scannerid = srvr.openScanner(metaRegionName, COL_REGIONINFO_ARRAY,
-        firstRowInTable, LATEST_TIMESTAMP, null);
+    Scan scan = new Scan(firstRowInTable);
+    scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
+    long scannerid = srvr.openScanner(metaRegionName, scan);
     try {
-      RowResult data = srvr.next(scannerid);
+      Result data = srvr.next(scannerid);
       if (data != null && data.size() > 0) {
-        HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
+        HRegionInfo info = Writables.getHRegionInfo(
+            data.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
         if (info.getTableDesc().getNameAsString().equals(tableName)) {
           // A region for this table already exists. Ergo table exists.
           throw new TableExistsException(tableName);
@@ -752,7 +754,7 @@
 
   public void deleteColumn(final byte [] tableName, final byte [] c)
   throws IOException {
-    new DeleteColumn(this, tableName, HStoreKey.getFamily(c)).process();
+    new DeleteColumn(this, tableName, KeyValue.parseColumn(c)[0]).process();
   }
 
   public void enableTable(final byte [] tableName) throws IOException {
@@ -778,23 +780,23 @@
     for (MetaRegion m: regions) {
       byte [] metaRegionName = m.getRegionName();
       HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
+      Scan scan = new Scan(firstRowInTable);
+      scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
+      scan.addColumn(CATALOG_FAMILY, SERVER_QUALIFIER);
       long scannerid = 
-        srvr.openScanner(metaRegionName, 
-          new byte[][] {COL_REGIONINFO, COL_SERVER},
-          firstRowInTable, 
-          LATEST_TIMESTAMP, 
-          null);
+        srvr.openScanner(metaRegionName, scan);
       try {
         while (true) {
-          RowResult data = srvr.next(scannerid);
+          Result data = srvr.next(scannerid);
           if (data == null || data.size() <= 0)
             break;
-          HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
+          HRegionInfo info = Writables.getHRegionInfo(
+              data.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
           if (Bytes.compareTo(info.getTableDesc().getName(), tableName) == 0) {
-            Cell cell = data.get(COL_SERVER);
-            if (cell != null) {
+            byte [] value = data.getValue(CATALOG_FAMILY, SERVER_QUALIFIER);
+            if (value != null) {
               HServerAddress server =
-                new HServerAddress(Bytes.toString(cell.getValue()));
+                new HServerAddress(Bytes.toString(value));
               result.add(new Pair<HRegionInfo,HServerAddress>(info, server));
             }
           } else {
@@ -816,25 +818,25 @@
       byte [] firstRowInTable = Bytes.toBytes(Bytes.toString(tableName) + ",,");
       byte [] metaRegionName = m.getRegionName();
       HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
+      Scan scan = new Scan(firstRowInTable);
+      scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
+      scan.addColumn(CATALOG_FAMILY, SERVER_QUALIFIER);
       long scannerid = 
-          srvr.openScanner(metaRegionName, 
-            new byte[][] {COL_REGIONINFO, COL_SERVER},
-            firstRowInTable, 
-            LATEST_TIMESTAMP, 
-            null);
+        srvr.openScanner(metaRegionName, scan);
       try {
         while (true) {
-          RowResult data = srvr.next(scannerid);
+          Result data = srvr.next(scannerid);
           if (data == null || data.size() <= 0)
             break;
-          HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
+          HRegionInfo info = Writables.getHRegionInfo(
+              data.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
           if (Bytes.compareTo(info.getTableDesc().getName(), tableName) == 0) {
             if ((Bytes.compareTo(info.getStartKey(), rowKey) >= 0) &&
                 (Bytes.compareTo(info.getEndKey(), rowKey) < 0)) {
-                Cell cell = data.get(COL_SERVER);
-                if (cell != null) {
+                byte [] value = data.getValue(CATALOG_FAMILY, SERVER_QUALIFIER);
+                if (value != null) {
                   HServerAddress server =
-                    new HServerAddress(Bytes.toString(cell.getValue()));
+                    new HServerAddress(Bytes.toString(value));
                   return new Pair<HRegionInfo,HServerAddress>(info, server);
                 }
             }
@@ -857,15 +859,17 @@
     for (MetaRegion m: regions) {
       byte [] metaRegionName = m.getRegionName();
       HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
-      RowResult data = srvr.getRow(metaRegionName, regionName, 
-        new byte[][] {COL_REGIONINFO, COL_SERVER},
-        HConstants.LATEST_TIMESTAMP, 1, -1L);
+      Get get = new Get(regionName);
+      get.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
+      get.addColumn(CATALOG_FAMILY, SERVER_QUALIFIER);
+      Result data = srvr.get(metaRegionName, get);
       if(data == null || data.size() <= 0) continue;
-      HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
-      Cell cell = data.get(COL_SERVER);
-      if(cell != null) {
+      HRegionInfo info = Writables.getHRegionInfo(
+          data.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
+      byte [] value = data.getValue(CATALOG_FAMILY, SERVER_QUALIFIER);
+      if(value != null) {
         HServerAddress server =
-          new HServerAddress(Bytes.toString(cell.getValue()));
+          new HServerAddress(Bytes.toString(value));
         return new Pair<HRegionInfo,HServerAddress>(info, server);
       }
     }
@@ -876,15 +880,18 @@
    * Get row from meta table.
    * @param row
    * @param columns
-   * @return RowResult
+   * @return Result
    * @throws IOException
    */
-  protected RowResult getFromMETA(final byte [] row, final byte [][] columns)
+  protected Result getFromMETA(final byte [] row, final byte [] family)
   throws IOException {
     MetaRegion meta = this.regionManager.getMetaRegionForRow(row);
     HRegionInterface srvr = getMETAServer(meta);
-    return srvr.getRow(meta.getRegionName(), row, columns,
-      HConstants.LATEST_TIMESTAMP, 1, -1);
+
+    Get get = new Get(row);
+    get.addFamily(family);
+    
+    return srvr.get(meta.getRegionName(), get);
   }
   
   /*
@@ -897,10 +904,11 @@
     return this.connection.getHRegionConnection(meta.getServer());
   }
 
-  public void modifyTable(final byte[] tableName, int op, Writable[] args)
+  public void modifyTable(final byte[] tableName, HConstants.Modify op, 
+      Writable[] args)
     throws IOException {
     switch (op) {
-    case MODIFY_TABLE_SET_HTD:
+    case TABLE_SET_HTD:
       if (args == null || args.length < 1 || 
           !(args[0] instanceof HTableDescriptor))
         throw new IOException("SET_HTD request requires an HTableDescriptor");
@@ -909,10 +917,10 @@
       new ModifyTableMeta(this, tableName, htd).process();
       break;
 
-    case MODIFY_TABLE_SPLIT:
-    case MODIFY_TABLE_COMPACT:
-    case MODIFY_TABLE_MAJOR_COMPACT:
-    case MODIFY_TABLE_FLUSH:
+    case TABLE_SPLIT:
+    case TABLE_COMPACT:
+    case TABLE_MAJOR_COMPACT:
+    case TABLE_FLUSH:
       if (args != null && args.length > 0) {
         if (!(args[0] instanceof ImmutableBytesWritable))
           throw new IOException(
@@ -936,7 +944,7 @@
       }
       break;
 
-    case MODIFY_CLOSE_REGION:
+    case CLOSE_REGION:
       if (args == null || args.length < 1 || args.length > 2) {
         throw new IOException("Requires at least a region name; " +
           "or cannot have more than region name and servername");
@@ -947,12 +955,13 @@
       if (args.length == 2) {
         servername = Bytes.toString(((ImmutableBytesWritable)args[1]).get());
       }
-      // Need hri
-      RowResult rr = getFromMETA(regionname, HConstants.COLUMN_FAMILY_ARRAY);
+      // Need hri 
+      Result rr = getFromMETA(regionname, HConstants.CATALOG_FAMILY);
       HRegionInfo hri = getHRegionInfo(rr.getRow(), rr);
       if (servername == null) {
         // Get server from the .META. if it wasn't passed as argument
-        servername = Writables.cellToString(rr.get(COL_SERVER));
+        servername = 
+          Bytes.toString(rr.getValue(CATALOG_FAMILY, SERVER_QUALIFIER));
       }
       LOG.info("Marking " + hri.getRegionNameAsString() +
         " as closed on " + servername + "; cleaning SERVER + STARTCODE; " +
@@ -995,7 +1004,8 @@
   public HBaseConfiguration getConfiguration() {
     return this.conf;
   }
-    
+  
+  // TODO ryan rework this function
   /*
    * Get HRegionInfo from passed META map of row values.
    * Returns null if none found (and logs fact that expected COL_REGIONINFO
@@ -1005,22 +1015,24 @@
    * @return Null or found HRegionInfo.
    * @throws IOException
    */
-  HRegionInfo getHRegionInfo(final byte [] row, final Map<byte [], Cell> map)
+  HRegionInfo getHRegionInfo(final byte [] row, final Result res)
   throws IOException {
-    Cell regioninfo = map.get(COL_REGIONINFO);
+    byte [] regioninfo = res.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
     if (regioninfo == null) {
       StringBuilder sb =  new StringBuilder();
-      for (byte [] e: map.keySet()) {
+      NavigableMap<byte[], byte[]> infoMap = res.getFamilyMap(CATALOG_FAMILY);
+      for (byte [] e: infoMap.keySet()) {
         if (sb.length() > 0) {
           sb.append(", ");
         }
-        sb.append(Bytes.toString(e));
+        sb.append(Bytes.toString(CATALOG_FAMILY) + ":" + Bytes.toString(e));
       }
-      LOG.warn(Bytes.toString(COL_REGIONINFO) + " is empty for row: " +
+      LOG.warn(Bytes.toString(CATALOG_FAMILY) + ":" +
+          Bytes.toString(REGIONINFO_QUALIFIER) + " is empty for row: " +
          Bytes.toString(row) + "; has keys: " + sb.toString());
       return null;
     }
-    return Writables.getHRegionInfo(regioninfo.getValue());
+    return Writables.getHRegionInfo(regioninfo);
   }
 
   /*
@@ -1065,7 +1077,6 @@
     System.exit(0);
   }
 
-  @SuppressWarnings("null")
   protected static void doMain(String [] args,
       Class<? extends HMaster> masterClass) {
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java Sat Jun  6 01:26:21 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -50,9 +51,9 @@
   protected void updateRegionInfo(HRegionInterface server, byte [] regionName,
     HRegionInfo i)
   throws IOException {
-    BatchUpdate b = new BatchUpdate(i.getRegionName());
-    b.put(COL_REGIONINFO, Writables.getBytes(i));
-    server.batchUpdate(regionName, b, -1L);
+    Put put = new Put(i.getRegionName());
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(i));
+    server.put(regionName, put);
     LOG.debug("updated HTableDescriptor for region " + i.getRegionNameAsString());
   }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java Sat Jun  6 01:26:21 2009
@@ -25,6 +25,9 @@
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.RegionHistorian;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -78,11 +81,12 @@
         " in region " + Bytes.toString(metaRegionName) +
         " with startcode " + serverInfo.getStartCode() + " and server " +
         serverInfo.getServerAddress());
-    BatchUpdate b = new BatchUpdate(regionInfo.getRegionName());
-    b.put(COL_SERVER,
+    Put p = new Put(regionInfo.getRegionName());
+    p.add(CATALOG_FAMILY, SERVER_QUALIFIER,
         Bytes.toBytes(serverInfo.getServerAddress().toString()));
-    b.put(COL_STARTCODE, Bytes.toBytes(serverInfo.getStartCode()));
-    server.batchUpdate(metaRegionName, b, -1L);
+    p.add(CATALOG_FAMILY, STARTCODE_QUALIFIER,
+        Bytes.toBytes(serverInfo.getStartCode()));
+    server.put(metaRegionName, p);
     if (!historian.isOnline()) {
       // This is safest place to do the onlining of the historian in
       // the master.  When we get to here, we know there is a .META.

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java Sat Jun  6 01:26:21 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2008 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -31,6 +31,8 @@
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -113,7 +115,7 @@
     List<byte []> emptyRows = new ArrayList<byte []>();
     try {
       while (true) {
-        RowResult values = null;
+        Result values = null;
         try {
           values = server.next(scannerId);
         } catch (IOException e) {
@@ -129,8 +131,10 @@
         // shutdown server but that would mean that we'd reassign regions that
         // were already out being assigned, ones that were product of a split
         // that happened while the shutdown was being processed.
-        String serverAddress = Writables.cellToString(values.get(COL_SERVER));
-        long startCode = Writables.cellToLong(values.get(COL_STARTCODE)); 
+        String serverAddress = 
+          Bytes.toString(values.getValue(CATALOG_FAMILY, SERVER_QUALIFIER));
+        long startCode =
+          Bytes.toLong(values.getValue(CATALOG_FAMILY, STARTCODE_QUALIFIER));
         String serverName = null;
         if (serverAddress != null && serverAddress.length() > 0) {
           serverName = HServerInfo.getServerName(serverAddress, startCode);
@@ -145,6 +149,7 @@
             Bytes.toString(row));
         }
 
+//        HRegionInfo info = master.getHRegionInfo(row, values.rowResult());
         HRegionInfo info = master.getHRegionInfo(row, values);
         if (info == null) {
           emptyRows.add(row);
@@ -221,9 +226,10 @@
         LOG.debug("process server shutdown scanning root region on " +
             master.getRootRegionLocation().getBindAddress());
       }
+      Scan scan = new Scan();
+      scan.addFamily(CATALOG_FAMILY);
       long scannerId = server.openScanner(
-          HRegionInfo.ROOT_REGIONINFO.getRegionName(), COLUMN_FAMILY_ARRAY,
-          EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
+          HRegionInfo.ROOT_REGIONINFO.getRegionName(), scan);
       scanMetaRegion(server, scannerId,
           HRegionInfo.ROOT_REGIONINFO.getRegionName());
       return true;
@@ -240,9 +246,10 @@
         LOG.debug("process server shutdown scanning " +
           Bytes.toString(m.getRegionName()) + " on " + m.getServer());
       }
-      long scannerId =
-        server.openScanner(m.getRegionName(), COLUMN_FAMILY_ARRAY,
-        EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
+      Scan scan = new Scan();
+      scan.addFamily(CATALOG_FAMILY);
+      long scannerId = server.openScanner(
+          HRegionInfo.ROOT_REGIONINFO.getRegionName(), scan);
       scanMetaRegion(server, scannerId, m.getRegionName());
       return true;
     }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java Sat Jun  6 01:26:21 2009
@@ -49,12 +49,12 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionHistorian;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.HMsg;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
 
@@ -723,9 +723,12 @@
     // 3. Insert into meta
     HRegionInfo info = region.getRegionInfo();
     byte [] regionName = region.getRegionName();
-    BatchUpdate b = new BatchUpdate(regionName);
-    b.put(COL_REGIONINFO, Writables.getBytes(info));
-    server.batchUpdate(metaRegionName, b, -1L);
+    
+    Put put = new Put(regionName);
+    byte [] infoBytes = Writables.getBytes(info);
+    String infoString = new String(infoBytes);
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(info));
+    server.put(metaRegionName, put);
     
     // 4. Close the new region to flush it to disk.  Close its log file too.
     region.close();
@@ -1204,18 +1207,21 @@
    * @param op
    */
   public void startAction(byte[] regionName, HRegionInfo info,
-      HServerAddress server, int op) {
+      HServerAddress server, HConstants.Modify op) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Adding operation " + op + " from tasklist");
+    }
     switch (op) {
-      case HConstants.MODIFY_TABLE_SPLIT:
+      case TABLE_SPLIT:
         startAction(regionName, info, server, this.regionsToSplit);
         break;
-      case HConstants.MODIFY_TABLE_COMPACT:
+      case TABLE_COMPACT:
         startAction(regionName, info, server, this.regionsToCompact);
         break;
-      case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
+      case TABLE_MAJOR_COMPACT:
         startAction(regionName, info, server, this.regionsToMajorCompact);
         break;
-      case HConstants.MODIFY_TABLE_FLUSH:
+      case TABLE_FLUSH:
         startAction(regionName, info, server, this.regionsToFlush);
         break;
       default:
@@ -1233,18 +1239,21 @@
    * @param regionName
    * @param op
    */
-  public void endAction(byte[] regionName, int op) {
+  public void endAction(byte[] regionName, HConstants.Modify op) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Removing operation " + op + " from tasklist");
+    }
     switch (op) {
-    case HConstants.MODIFY_TABLE_SPLIT:
+    case TABLE_SPLIT:
       this.regionsToSplit.remove(regionName);
       break;
-    case HConstants.MODIFY_TABLE_COMPACT:
+    case TABLE_COMPACT:
       this.regionsToCompact.remove(regionName);
       break;
-    case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
+    case TABLE_MAJOR_COMPACT:
       this.regionsToMajorCompact.remove(regionName);
       break;
-    case HConstants.MODIFY_TABLE_FLUSH:
+    case TABLE_FLUSH:
       this.regionsToFlush.remove(regionName);
       break;
     default:
@@ -1267,6 +1276,9 @@
    * @param returnMsgs
    */
   public void applyActions(HServerInfo serverInfo, ArrayList<HMsg> returnMsgs) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Applying operation in tasklists to region");
+    }
     applyActions(serverInfo, returnMsgs, this.regionsToCompact,
         HMsg.Type.MSG_REGION_COMPACT);
     applyActions(serverInfo, returnMsgs, this.regionsToSplit,

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java Sat Jun  6 01:26:21 2009
@@ -31,10 +31,10 @@
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 
 /**
  * Abstract base class for operations that need to examine all HRegionInfo 
@@ -80,26 +80,28 @@
       // Open a scanner on the meta region
       byte [] tableNameMetaStart =
           Bytes.toBytes(Bytes.toString(tableName) + ",,");
-
-      long scannerId = server.openScanner(m.getRegionName(),
-          COLUMN_FAMILY_ARRAY, tableNameMetaStart, HConstants.LATEST_TIMESTAMP, null);
+      Scan scan = new Scan(tableNameMetaStart).addFamily(CATALOG_FAMILY);
+      long scannerId = server.openScanner(m.getRegionName(), scan);
 
       List<byte []> emptyRows = new ArrayList<byte []>();
       try {
         while (true) {
-          RowResult values = server.next(scannerId);
-          if(values == null || values.size() == 0) {
+          Result values = server.next(scannerId);
+          if(values == null || values.isEmpty()) {
             break;
           }
           HRegionInfo info = this.master.getHRegionInfo(values.getRow(), values);
           if (info == null) {
             emptyRows.add(values.getRow());
-            LOG.error(Bytes.toString(COL_REGIONINFO) + " not found on " +
+            LOG.error(Bytes.toString(CATALOG_FAMILY) + ":" +
+                Bytes.toString(REGIONINFO_QUALIFIER) + " not found on " +
                       Bytes.toString(values.getRow()));
             continue;
           }
-          String serverAddress = Writables.cellToString(values.get(COL_SERVER));
-          long startCode = Writables.cellToLong(values.get(COL_STARTCODE)); 
+          String serverAddress = 
+            Bytes.toString(values.getValue(CATALOG_FAMILY, SERVER_QUALIFIER));
+          long startCode = 
+            Bytes.toLong(values.getValue(CATALOG_FAMILY, STARTCODE_QUALIFIER)); 
           String serverName = null;
           if (serverAddress != null && serverAddress.length() > 0) {
             serverName = HServerInfo.getServerName(serverAddress, startCode);

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,112 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Simple wrapper for a byte buffer and a counter.  Does not copy.
+ * <p>
+ * NOT thread-safe because it is not used in a multi-threaded context, yet.
+ */
+public class ColumnCount {
+  private byte [] bytes;
+  private int offset;
+  private int length;
+  private int count;
+  
+  /**
+   * Constructor
+   * @param column the qualifier to count the versions for
+   */
+  public ColumnCount(byte [] column) {
+    this(column, 0);
+  }
+  
+  /**
+   * Constructor
+   * @param column the qualifier to count the versions for
+   * @param count initial count
+   */
+  public ColumnCount(byte [] column, int count) {
+    this(column, 0, column.length, count);
+  }
+  
+  /**
+   * Constuctor
+   * @param column the qualifier to count the versions for
+   * @param offset in the passed buffer where to start the qualifier from
+   * @param length of the qualifier
+   * @param count initial count
+   */
+  public ColumnCount(byte [] column, int offset, int length, int count) {
+    this.bytes = column;
+    this.offset = offset;
+    this.length = length;
+    this.count = count;
+  }
+  
+  /**
+   * @return the buffer
+   */
+  public byte [] getBuffer(){
+    return this.bytes;
+  }
+  
+  /**
+   * @return the offset
+   */
+  public int getOffset(){
+    return this.offset;
+  }
+  
+  /**
+   * @return the length
+   */
+  public int getLength(){
+    return this.length;
+  }  
+  
+  /**
+   * Decrement the current version count
+   * @return current count
+   */
+  public int decrement() {
+    return --count;
+  }
+
+  /**
+   * Increment the current version count
+   * @return current count
+   */
+  public int increment() {
+    return ++count;
+  }
+  
+  /**
+   * Check to see if needed to fetch more versions
+   * @param max
+   * @return true if more versions are needed, false otherwise
+   */
+  public boolean needMore(int max) {
+    if(this.count < max) {
+      return true;
+    }
+    return false;
+  }
+}

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,78 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+
+/**
+ * Implementing classes of this interface will be used for the tracking
+ * and enforcement of columns and numbers of versions during the course of a 
+ * Get or Scan operation.
+ * <p>
+ * Currently there are two different types of Store/Family-level queries.
+ * <ul><li>{@link ExplicitColumnTracker} is used when the query specifies
+ * one or more column qualifiers to return in the family.
+ * <li>{@link WildcardColumnTracker} is used when the query asks for all
+ * qualifiers within the family.
+ * <p>
+ * This class is utilized by {@link QueryMatcher} through two methods:
+ * <ul><li>{@link checkColumn} is called when a Put satisfies all other
+ * conditions of the query.  This method returns a {@link MatchCode} to define
+ * what action should be taken.
+ * <li>{@link update} is called at the end of every StoreFile or Memcache.
+ * <p>
+ * This class is NOT thread-safe as queries are never multi-threaded 
+ */
+public interface ColumnTracker {
+  /**
+   * Keeps track of the number of versions for the columns asked for
+   * @param bytes
+   * @param offset
+   * @param length
+   * @return
+   */
+  public MatchCode checkColumn(byte [] bytes, int offset, int length);
+  /**
+   * Updates internal variables in between files
+   */
+  public void update();
+  /**
+   * Resets the Matcher
+   */
+  public void reset();
+  /**
+   * 
+   * @return
+   */
+  public boolean done();
+
+  /**
+   * Used by matcher and scan/get to get a hint of the next column
+   * to seek to after checkColumn() returns SKIP.  Returns the next interesting
+   * column we want, or NULL there is none (wildcard scanner).
+   *
+   * Implementations aren't required to return anything useful unless the most recent
+   * call was to checkColumn() and the return code was SKIP.  This is pretty implementation
+   * detail-y, but optimizations are like that.
+   *
+   * @return null, or a ColumnCount that we should seek to
+   */
+  public ColumnCount getColumnHint();
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Sat Jun  6 01:26:21 2009
@@ -30,11 +30,11 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
 
@@ -202,18 +202,21 @@
     // Inform the HRegionServer that the parent HRegion is no-longer online.
     this.server.removeFromOnlineRegions(oldRegionInfo);
     
-    BatchUpdate update = new BatchUpdate(oldRegionInfo.getRegionName());
-    update.put(COL_REGIONINFO, Writables.getBytes(oldRegionInfo));
-    update.put(COL_SPLITA, Writables.getBytes(newRegions[0].getRegionInfo()));
-    update.put(COL_SPLITB, Writables.getBytes(newRegions[1].getRegionInfo()));
-    t.commit(update);
+    Put put = new Put(oldRegionInfo.getRegionName());
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, 
+        Writables.getBytes(oldRegionInfo));
+    put.add(CATALOG_FAMILY, SPLITA_QUALIFIER,
+        Writables.getBytes(newRegions[0].getRegionInfo()));
+    put.add(CATALOG_FAMILY, SPLITB_QUALIFIER,
+        Writables.getBytes(newRegions[0].getRegionInfo()));
+    t.put(put);
     
     // Add new regions to META
     for (int i = 0; i < newRegions.length; i++) {
-      update = new BatchUpdate(newRegions[i].getRegionName());
-      update.put(COL_REGIONINFO, Writables.getBytes(
+      put = new Put(newRegions[i].getRegionName());
+      put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(
         newRegions[i].getRegionInfo()));
-      t.commit(update);
+      t.put(put);
     }
         
     // Now tell the master about the new regions

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,120 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * Class that provides static method needed when putting deletes into memcache 
+ */
+public class DeleteCompare {
+  
+  /**
+   * Return codes from deleteCompare.
+   */
+  enum DeleteCode {
+    /**
+     * Do nothing.  Move to next KV in Memcache
+     */
+    SKIP,
+    
+    /**
+     * Add to the list of deletes.
+     */
+    DELETE,
+    
+    /**
+     * Stop looking at KVs in Memcache.  Finalize.
+     */
+    DONE
+  }
+
+  /**
+   * Method used when putting deletes into memcache to remove all the previous
+   * entries that are affected by this Delete
+   * @param mem
+   * @param deleteBuffer
+   * @param deleteRowOffset
+   * @param deleteRowLength
+   * @param deleteQualifierOffset
+   * @param deleteQualifierLength
+   * @param deleteTimeOffset
+   * @param deleteType
+   * @param comparator
+   * @return SKIP if current KeyValue should not be deleted, DELETE if
+   * current KeyValue should be deleted and DONE when the current KeyValue is
+   * out of the Deletes range
+   */
+  public static DeleteCode deleteCompare(KeyValue mem, byte [] deleteBuffer,
+      int deleteRowOffset, short deleteRowLength, int deleteQualifierOffset,
+      int deleteQualifierLength, int deleteTimeOffset, byte deleteType,
+      KeyValue.KeyComparator comparator) {
+
+    //Parsing new KeyValue
+    byte [] memBuffer = mem.getBuffer();
+    int memOffset = mem.getOffset();
+
+    //Getting key lengths
+    int memKeyLen = Bytes.toInt(memBuffer, memOffset);
+    memOffset += Bytes.SIZEOF_INT;
+
+    //Skipping value lengths
+    memOffset += Bytes.SIZEOF_INT;
+
+    //Getting row lengths
+    short memRowLen = Bytes.toShort(memBuffer, memOffset);
+    memOffset += Bytes.SIZEOF_SHORT;
+    int res = comparator.compareRows(memBuffer, memOffset, memRowLen,
+        deleteBuffer, deleteRowOffset, deleteRowLength);
+    if(res > 0) {
+      return DeleteCode.DONE;
+    } else if(res < 0){
+      System.out.println("SKIPPING ROW");
+      return DeleteCode.SKIP;
+    }
+
+    memOffset += memRowLen;
+
+    //Getting family lengths
+    byte memFamLen = memBuffer[memOffset];
+    memOffset += Bytes.SIZEOF_BYTE + memFamLen;
+
+    //Get column lengths
+    int memQualifierLen = memKeyLen - memRowLen - memFamLen -
+      Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG -
+      Bytes.SIZEOF_BYTE;
+
+    //Compare timestamp
+    int tsOffset = memOffset + memQualifierLen;
+    int timeRes = Bytes.compareTo(memBuffer, tsOffset, Bytes.SIZEOF_LONG,
+        deleteBuffer, deleteTimeOffset, Bytes.SIZEOF_LONG);
+
+    if(deleteType == KeyValue.Type.DeleteFamily.getCode()) {
+      if(timeRes <= 0){
+        return DeleteCode.DELETE;
+      }
+      return DeleteCode.SKIP;
+    }
+
+    //Compare columns
+    res = Bytes.compareTo(memBuffer, memOffset, memQualifierLen,
+        deleteBuffer, deleteQualifierOffset, deleteQualifierLength);
+    if(res < 0) {
+      return DeleteCode.SKIP;
+    } else if(res > 0) {
+      return DeleteCode.DONE;
+    }
+    // same column, compare the time.
+    if(timeRes == 0) {
+      return DeleteCode.DELETE;
+    } else if (timeRes < 0) {
+      if(deleteType == KeyValue.Type.DeleteColumn.getCode()) {
+        return DeleteCode.DELETE;
+      }
+      return DeleteCode.DONE;
+    } else {
+      System.out.println("SKIPPING TS");
+      return DeleteCode.SKIP;
+    }
+  } 
+}

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * This interface is used for the tracking and enforcement of Deletes
+ * during the course of a Get or Scan operation.
+ * <p>
+ * This class is utilized through three methods:
+ * <ul><li>{@link add} when encountering a Delete
+ * <li>{@link isDeleted} when checking if a Put KeyValue has been deleted
+ * <li>{@link update} when reaching the end of a StoreFile 
+ */
+public interface DeleteTracker {
+  
+  /**
+   * Add the specified KeyValue to the list of deletes to check against for
+   * this row operation.
+   * <p>
+   * This is called when a Delete is encountered in a StoreFile.
+   * @param buffer KeyValue buffer
+   * @param qualifierOffset column qualifier offset
+   * @param qualifierLength column qualifier length
+   * @param timestamp timestamp
+   * @param type delete type as byte
+   */
+  public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
+      long timestamp, byte type);
+  
+  /**
+   * Check if the specified KeyValue buffer has been deleted by a previously
+   * seen delete.
+   * @param buffer KeyValue buffer
+   * @param qualifierOffset column qualifier offset
+   * @param qualifierLength column qualifier length
+   * @param timestamp timestamp
+   * @return true is the specified KeyValue is deleted, false if not
+   */
+  public boolean isDeleted(byte [] buffer, int qualifierOffset,
+      int qualifierLength, long timestamp);
+  
+  /**
+   * @return true if there are no current delete, false otherwise
+   */
+  public boolean isEmpty();
+  
+  /**
+   * Called at the end of every StoreFile.
+   * <p>
+   * Many optimized implementations of Trackers will require an update at
+   * when the end of each StoreFile is reached.
+   */
+  public void update();
+  
+  /**
+   * Called between rows.
+   * <p>
+   * This clears everything as if a new DeleteTracker was instantiated.
+   */
+  public void reset();
+  
+
+  /**
+   * Return codes for comparison of two Deletes.
+   * <p>
+   * The codes tell the merging function what to do.
+   * <p>
+   * INCLUDE means add the specified Delete to the merged list.
+   * NEXT means move to the next element in the specified list(s).
+   */
+  enum DeleteCompare { 
+    INCLUDE_OLD_NEXT_OLD,
+    INCLUDE_OLD_NEXT_BOTH,
+    INCLUDE_NEW_NEXT_NEW,
+    INCLUDE_NEW_NEXT_BOTH,
+    NEXT_OLD,
+    NEXT_NEW
+  }
+  
+}



Mime
View raw message