hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r785076 [7/18] - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ bin/ conf/ src/java/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/j...
Date Tue, 16 Jun 2009 04:34:02 GMT
Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java Tue Jun 16 04:33:56 2009
@@ -25,7 +25,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.util.Writables;
 
@@ -47,9 +47,9 @@
 
   protected void updateRegionInfo(HRegionInterface server, byte [] regionName,
     HRegionInfo i) throws IOException {
-    BatchUpdate b = new BatchUpdate(i.getRegionName());
-    b.put(COL_REGIONINFO, Writables.getBytes(i));
-    server.batchUpdate(regionName, b, -1L);
+    Put put = new Put(i.getRegionName());
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(i));
+    server.put(regionName, put);
     if (LOG.isDebugEnabled()) {
       LOG.debug("updated columns in row: " + i.getRegionNameAsString());
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java Tue Jun 16 04:33:56 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -23,16 +23,15 @@
 import java.lang.management.ManagementFactory;
 import java.lang.management.RuntimeMXBean;
 import java.lang.reflect.Constructor;
-import java.net.InetAddress;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.NavigableMap;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.DelayQueue;
-import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.PriorityBlockingQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -53,19 +52,20 @@
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.HServerLoad;
-import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.RegionHistorian;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.ServerConnection;
 import org.apache.hadoop.hbase.client.ServerConnectionManager;
-import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.ipc.HBaseRPC;
 import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
 import org.apache.hadoop.hbase.ipc.HMasterInterface;
@@ -424,7 +424,9 @@
     }
     server.stop();                      // Stop server
     regionManager.stop();
-    
+
+    zooKeeperWrapper.close();
+
     // Join up with all threads
     LOG.info("HMaster main thread exiting");
   }
@@ -714,12 +716,14 @@
     byte [] metaRegionName = m.getRegionName();
     HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
     byte[] firstRowInTable = Bytes.toBytes(tableName + ",,");
-    long scannerid = srvr.openScanner(metaRegionName, COL_REGIONINFO_ARRAY,
-        firstRowInTable, LATEST_TIMESTAMP, null);
+    Scan scan = new Scan(firstRowInTable);
+    scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
+    long scannerid = srvr.openScanner(metaRegionName, scan);
     try {
-      RowResult data = srvr.next(scannerid);
+      Result data = srvr.next(scannerid);
       if (data != null && data.size() > 0) {
-        HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
+        HRegionInfo info = Writables.getHRegionInfo(
+            data.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
         if (info.getTableDesc().getNameAsString().equals(tableName)) {
           // A region for this table already exists. Ergo table exists.
           throw new TableExistsException(tableName);
@@ -752,7 +756,7 @@
 
   public void deleteColumn(final byte [] tableName, final byte [] c)
   throws IOException {
-    new DeleteColumn(this, tableName, HStoreKey.getFamily(c)).process();
+    new DeleteColumn(this, tableName, KeyValue.parseColumn(c)[0]).process();
   }
 
   public void enableTable(final byte [] tableName) throws IOException {
@@ -778,23 +782,23 @@
     for (MetaRegion m: regions) {
       byte [] metaRegionName = m.getRegionName();
       HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
+      Scan scan = new Scan(firstRowInTable);
+      scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
+      scan.addColumn(CATALOG_FAMILY, SERVER_QUALIFIER);
       long scannerid = 
-        srvr.openScanner(metaRegionName, 
-          new byte[][] {COL_REGIONINFO, COL_SERVER},
-          firstRowInTable, 
-          LATEST_TIMESTAMP, 
-          null);
+        srvr.openScanner(metaRegionName, scan);
       try {
         while (true) {
-          RowResult data = srvr.next(scannerid);
+          Result data = srvr.next(scannerid);
           if (data == null || data.size() <= 0)
             break;
-          HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
+          HRegionInfo info = Writables.getHRegionInfo(
+              data.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
           if (Bytes.compareTo(info.getTableDesc().getName(), tableName) == 0) {
-            Cell cell = data.get(COL_SERVER);
-            if (cell != null) {
+            byte [] value = data.getValue(CATALOG_FAMILY, SERVER_QUALIFIER);
+            if (value != null) {
               HServerAddress server =
-                new HServerAddress(Bytes.toString(cell.getValue()));
+                new HServerAddress(Bytes.toString(value));
               result.add(new Pair<HRegionInfo,HServerAddress>(info, server));
             }
           } else {
@@ -816,25 +820,25 @@
       byte [] firstRowInTable = Bytes.toBytes(Bytes.toString(tableName) + ",,");
       byte [] metaRegionName = m.getRegionName();
       HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
+      Scan scan = new Scan(firstRowInTable);
+      scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
+      scan.addColumn(CATALOG_FAMILY, SERVER_QUALIFIER);
       long scannerid = 
-          srvr.openScanner(metaRegionName, 
-            new byte[][] {COL_REGIONINFO, COL_SERVER},
-            firstRowInTable, 
-            LATEST_TIMESTAMP, 
-            null);
+        srvr.openScanner(metaRegionName, scan);
       try {
         while (true) {
-          RowResult data = srvr.next(scannerid);
+          Result data = srvr.next(scannerid);
           if (data == null || data.size() <= 0)
             break;
-          HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
+          HRegionInfo info = Writables.getHRegionInfo(
+              data.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
           if (Bytes.compareTo(info.getTableDesc().getName(), tableName) == 0) {
             if ((Bytes.compareTo(info.getStartKey(), rowKey) >= 0) &&
                 (Bytes.compareTo(info.getEndKey(), rowKey) < 0)) {
-                Cell cell = data.get(COL_SERVER);
-                if (cell != null) {
+                byte [] value = data.getValue(CATALOG_FAMILY, SERVER_QUALIFIER);
+                if (value != null) {
                   HServerAddress server =
-                    new HServerAddress(Bytes.toString(cell.getValue()));
+                    new HServerAddress(Bytes.toString(value));
                   return new Pair<HRegionInfo,HServerAddress>(info, server);
                 }
             }
@@ -857,15 +861,17 @@
     for (MetaRegion m: regions) {
       byte [] metaRegionName = m.getRegionName();
       HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
-      RowResult data = srvr.getRow(metaRegionName, regionName, 
-        new byte[][] {COL_REGIONINFO, COL_SERVER},
-        HConstants.LATEST_TIMESTAMP, 1, -1L);
+      Get get = new Get(regionName);
+      get.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
+      get.addColumn(CATALOG_FAMILY, SERVER_QUALIFIER);
+      Result data = srvr.get(metaRegionName, get);
       if(data == null || data.size() <= 0) continue;
-      HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
-      Cell cell = data.get(COL_SERVER);
-      if(cell != null) {
+      HRegionInfo info = Writables.getHRegionInfo(
+          data.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
+      byte [] value = data.getValue(CATALOG_FAMILY, SERVER_QUALIFIER);
+      if(value != null) {
         HServerAddress server =
-          new HServerAddress(Bytes.toString(cell.getValue()));
+          new HServerAddress(Bytes.toString(value));
         return new Pair<HRegionInfo,HServerAddress>(info, server);
       }
     }
@@ -875,16 +881,19 @@
   /**
    * Get row from meta table.
    * @param row
-   * @param columns
-   * @return RowResult
+   * @param family
+   * @return Result
    * @throws IOException
    */
-  protected RowResult getFromMETA(final byte [] row, final byte [][] columns)
+  protected Result getFromMETA(final byte [] row, final byte [] family)
   throws IOException {
     MetaRegion meta = this.regionManager.getMetaRegionForRow(row);
     HRegionInterface srvr = getMETAServer(meta);
-    return srvr.getRow(meta.getRegionName(), row, columns,
-      HConstants.LATEST_TIMESTAMP, 1, -1);
+
+    Get get = new Get(row);
+    get.addFamily(family);
+    
+    return srvr.get(meta.getRegionName(), get);
   }
   
   /*
@@ -897,10 +906,11 @@
     return this.connection.getHRegionConnection(meta.getServer());
   }
 
-  public void modifyTable(final byte[] tableName, int op, Writable[] args)
+  public void modifyTable(final byte[] tableName, HConstants.Modify op, 
+      Writable[] args)
     throws IOException {
     switch (op) {
-    case MODIFY_TABLE_SET_HTD:
+    case TABLE_SET_HTD:
       if (args == null || args.length < 1 || 
           !(args[0] instanceof HTableDescriptor))
         throw new IOException("SET_HTD request requires an HTableDescriptor");
@@ -909,10 +919,10 @@
       new ModifyTableMeta(this, tableName, htd).process();
       break;
 
-    case MODIFY_TABLE_SPLIT:
-    case MODIFY_TABLE_COMPACT:
-    case MODIFY_TABLE_MAJOR_COMPACT:
-    case MODIFY_TABLE_FLUSH:
+    case TABLE_SPLIT:
+    case TABLE_COMPACT:
+    case TABLE_MAJOR_COMPACT:
+    case TABLE_FLUSH:
       if (args != null && args.length > 0) {
         if (!(args[0] instanceof ImmutableBytesWritable))
           throw new IOException(
@@ -936,23 +946,25 @@
       }
       break;
 
-    case MODIFY_CLOSE_REGION:
+    case CLOSE_REGION:
       if (args == null || args.length < 1 || args.length > 2) {
         throw new IOException("Requires at least a region name; " +
           "or cannot have more than region name and servername");
       }
       // Arguments are regionname and an optional server name.
       byte [] regionname = ((ImmutableBytesWritable)args[0]).get();
+      LOG.debug("Attempting to close region: " + Bytes.toStringBinary(regionname));
       String servername = null;
       if (args.length == 2) {
         servername = Bytes.toString(((ImmutableBytesWritable)args[1]).get());
       }
-      // Need hri
-      RowResult rr = getFromMETA(regionname, HConstants.COLUMN_FAMILY_ARRAY);
+      // Need hri 
+      Result rr = getFromMETA(regionname, HConstants.CATALOG_FAMILY);
       HRegionInfo hri = getHRegionInfo(rr.getRow(), rr);
       if (servername == null) {
         // Get server from the .META. if it wasn't passed as argument
-        servername = Writables.cellToString(rr.get(COL_SERVER));
+        servername = 
+          Bytes.toString(rr.getValue(CATALOG_FAMILY, SERVER_QUALIFIER));
       }
       LOG.info("Marking " + hri.getRegionNameAsString() +
         " as closed on " + servername + "; cleaning SERVER + STARTCODE; " +
@@ -995,7 +1007,8 @@
   public HBaseConfiguration getConfiguration() {
     return this.conf;
   }
-    
+  
+  // TODO ryan rework this function
   /*
    * Get HRegionInfo from passed META map of row values.
    * Returns null if none found (and logs fact that expected COL_REGIONINFO
@@ -1005,22 +1018,24 @@
    * @return Null or found HRegionInfo.
    * @throws IOException
    */
-  HRegionInfo getHRegionInfo(final byte [] row, final Map<byte [], Cell> map)
+  HRegionInfo getHRegionInfo(final byte [] row, final Result res)
   throws IOException {
-    Cell regioninfo = map.get(COL_REGIONINFO);
+    byte [] regioninfo = res.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
     if (regioninfo == null) {
       StringBuilder sb =  new StringBuilder();
-      for (byte [] e: map.keySet()) {
+      NavigableMap<byte[], byte[]> infoMap = res.getFamilyMap(CATALOG_FAMILY);
+      for (byte [] e: infoMap.keySet()) {
         if (sb.length() > 0) {
           sb.append(", ");
         }
-        sb.append(Bytes.toString(e));
+        sb.append(Bytes.toString(CATALOG_FAMILY) + ":" + Bytes.toString(e));
       }
-      LOG.warn(Bytes.toString(COL_REGIONINFO) + " is empty for row: " +
+      LOG.warn(Bytes.toString(CATALOG_FAMILY) + ":" +
+          Bytes.toString(REGIONINFO_QUALIFIER) + " is empty for row: " +
          Bytes.toString(row) + "; has keys: " + sb.toString());
       return null;
     }
-    return Writables.getHRegionInfo(regioninfo.getValue());
+    return Writables.getHRegionInfo(regioninfo);
   }
 
   /*
@@ -1065,7 +1080,6 @@
     System.exit(0);
   }
 
-  @SuppressWarnings("null")
   protected static void doMain(String [] args,
       Class<? extends HMaster> masterClass) {
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java Tue Jun 16 04:33:56 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -50,9 +51,9 @@
   protected void updateRegionInfo(HRegionInterface server, byte [] regionName,
     HRegionInfo i)
   throws IOException {
-    BatchUpdate b = new BatchUpdate(i.getRegionName());
-    b.put(COL_REGIONINFO, Writables.getBytes(i));
-    server.batchUpdate(regionName, b, -1L);
+    Put put = new Put(i.getRegionName());
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(i));
+    server.put(regionName, put);
     LOG.debug("updated HTableDescriptor for region " + i.getRegionNameAsString());
   }
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java Tue Jun 16 04:33:56 2009
@@ -53,7 +53,7 @@
   @Override
   public String toString() {
     return "ProcessRegionClose of " + this.regionInfo.getRegionNameAsString() +
-      ", " + this.offlineRegion;
+      ", " + this.offlineRegion + ", reassign: " + this.reassignRegion;
   }
 
   @Override
@@ -83,8 +83,12 @@
         result = result == null ? true : result;
 
     } else if (reassignRegion) {
+      LOG.info("region set as unassigned: " + regionInfo.getRegionNameAsString());
       // we are reassigning the region eventually, so set it unassigned
       master.regionManager.setUnassigned(regionInfo, false);
+    } else {
+      LOG.info("Region was neither offlined, or asked to be reassigned, what gives: " +
+      regionInfo.getRegionNameAsString());
     }
 
     return result == null ? true : result;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java Tue Jun 16 04:33:56 2009
@@ -25,6 +25,9 @@
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.RegionHistorian;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -78,11 +81,12 @@
         " in region " + Bytes.toString(metaRegionName) +
         " with startcode " + serverInfo.getStartCode() + " and server " +
         serverInfo.getServerAddress());
-    BatchUpdate b = new BatchUpdate(regionInfo.getRegionName());
-    b.put(COL_SERVER,
+    Put p = new Put(regionInfo.getRegionName());
+    p.add(CATALOG_FAMILY, SERVER_QUALIFIER,
         Bytes.toBytes(serverInfo.getServerAddress().toString()));
-    b.put(COL_STARTCODE, Bytes.toBytes(serverInfo.getStartCode()));
-    server.batchUpdate(metaRegionName, b, -1L);
+    p.add(CATALOG_FAMILY, STARTCODE_QUALIFIER,
+        Bytes.toBytes(serverInfo.getStartCode()));
+    server.put(metaRegionName, p);
     if (!historian.isOnline()) {
       // This is safest place to do the onlining of the historian in
       // the master.  When we get to here, we know there is a .META.

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java Tue Jun 16 04:33:56 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2008 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -31,6 +31,8 @@
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -113,7 +115,7 @@
     List<byte []> emptyRows = new ArrayList<byte []>();
     try {
       while (true) {
-        RowResult values = null;
+        Result values = null;
         try {
           values = server.next(scannerId);
         } catch (IOException e) {
@@ -129,8 +131,10 @@
         // shutdown server but that would mean that we'd reassign regions that
         // were already out being assigned, ones that were product of a split
         // that happened while the shutdown was being processed.
-        String serverAddress = Writables.cellToString(values.get(COL_SERVER));
-        long startCode = Writables.cellToLong(values.get(COL_STARTCODE)); 
+        String serverAddress = 
+          Bytes.toString(values.getValue(CATALOG_FAMILY, SERVER_QUALIFIER));
+        long startCode =
+          Bytes.toLong(values.getValue(CATALOG_FAMILY, STARTCODE_QUALIFIER));
         String serverName = null;
         if (serverAddress != null && serverAddress.length() > 0) {
           serverName = HServerInfo.getServerName(serverAddress, startCode);
@@ -145,6 +149,7 @@
             Bytes.toString(row));
         }
 
+//        HRegionInfo info = master.getHRegionInfo(row, values.rowResult());
         HRegionInfo info = master.getHRegionInfo(row, values);
         if (info == null) {
           emptyRows.add(row);
@@ -221,9 +226,10 @@
         LOG.debug("process server shutdown scanning root region on " +
             master.getRootRegionLocation().getBindAddress());
       }
+      Scan scan = new Scan();
+      scan.addFamily(CATALOG_FAMILY);
       long scannerId = server.openScanner(
-          HRegionInfo.ROOT_REGIONINFO.getRegionName(), COLUMN_FAMILY_ARRAY,
-          EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
+          HRegionInfo.ROOT_REGIONINFO.getRegionName(), scan);
       scanMetaRegion(server, scannerId,
           HRegionInfo.ROOT_REGIONINFO.getRegionName());
       return true;
@@ -240,9 +246,10 @@
         LOG.debug("process server shutdown scanning " +
           Bytes.toString(m.getRegionName()) + " on " + m.getServer());
       }
-      long scannerId =
-        server.openScanner(m.getRegionName(), COLUMN_FAMILY_ARRAY,
-        EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
+      Scan scan = new Scan();
+      scan.addFamily(CATALOG_FAMILY);
+      long scannerId = server.openScanner(
+          HRegionInfo.ROOT_REGIONINFO.getRegionName(), scan);
       scanMetaRegion(server, scannerId, m.getRegionName());
       return true;
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/RegionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/RegionManager.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/RegionManager.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/RegionManager.java Tue Jun 16 04:33:56 2009
@@ -49,12 +49,12 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionHistorian;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.HMsg;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
 
@@ -499,7 +499,7 @@
       if (currentRegion.isRootRegion() || currentRegion.isMetaTable()) {
         continue;
       }
-      String regionName = currentRegion.getRegionNameAsString();
+      final String regionName = currentRegion.getRegionNameAsString();
       if (regionIsInTransition(regionName)) {
         skipped++;
         continue;
@@ -723,9 +723,12 @@
     // 3. Insert into meta
     HRegionInfo info = region.getRegionInfo();
     byte [] regionName = region.getRegionName();
-    BatchUpdate b = new BatchUpdate(regionName);
-    b.put(COL_REGIONINFO, Writables.getBytes(info));
-    server.batchUpdate(metaRegionName, b, -1L);
+    
+    Put put = new Put(regionName);
+    byte [] infoBytes = Writables.getBytes(info);
+    String infoString = new String(infoBytes);
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(info));
+    server.put(metaRegionName, put);
     
     // 4. Close the new region to flush it to disk.  Close its log file too.
     region.close();
@@ -1204,18 +1207,21 @@
    * @param op
    */
   public void startAction(byte[] regionName, HRegionInfo info,
-      HServerAddress server, int op) {
+      HServerAddress server, HConstants.Modify op) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Adding operation " + op + " from tasklist");
+    }
     switch (op) {
-      case HConstants.MODIFY_TABLE_SPLIT:
+      case TABLE_SPLIT:
         startAction(regionName, info, server, this.regionsToSplit);
         break;
-      case HConstants.MODIFY_TABLE_COMPACT:
+      case TABLE_COMPACT:
         startAction(regionName, info, server, this.regionsToCompact);
         break;
-      case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
+      case TABLE_MAJOR_COMPACT:
         startAction(regionName, info, server, this.regionsToMajorCompact);
         break;
-      case HConstants.MODIFY_TABLE_FLUSH:
+      case TABLE_FLUSH:
         startAction(regionName, info, server, this.regionsToFlush);
         break;
       default:
@@ -1233,18 +1239,21 @@
    * @param regionName
    * @param op
    */
-  public void endAction(byte[] regionName, int op) {
+  public void endAction(byte[] regionName, HConstants.Modify op) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Removing operation " + op + " from tasklist");
+    }
     switch (op) {
-    case HConstants.MODIFY_TABLE_SPLIT:
+    case TABLE_SPLIT:
       this.regionsToSplit.remove(regionName);
       break;
-    case HConstants.MODIFY_TABLE_COMPACT:
+    case TABLE_COMPACT:
       this.regionsToCompact.remove(regionName);
       break;
-    case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
+    case TABLE_MAJOR_COMPACT:
       this.regionsToMajorCompact.remove(regionName);
       break;
-    case HConstants.MODIFY_TABLE_FLUSH:
+    case TABLE_FLUSH:
       this.regionsToFlush.remove(regionName);
       break;
     default:

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ServerManager.java Tue Jun 16 04:33:56 2009
@@ -475,8 +475,6 @@
       
       // Should we tell it close regions because its overloaded?  If its
       // currently opening regions, leave it alone till all are open.
-      LOG.debug("Process all wells: " + serverInfo + " openingCount: " + openingCount +
-          ", nobalancingCount: " + nobalancingCount);
       if ((openingCount < this.nobalancingCount)) {
         this.master.regionManager.assignRegions(serverInfo, mostLoadedRegions,
             returnMsgs);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableDelete.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableDelete.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableDelete.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableDelete.java Tue Jun 16 04:33:56 2009
@@ -45,6 +45,7 @@
   protected void processScanItem(String serverName,
       final HRegionInfo info) throws IOException {
     if (isEnabled(info)) {
+      LOG.debug("Region still enabled: " + info.toString());
       throw new TableNotDisabledException(tableName);
     }
   }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableOperation.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableOperation.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableOperation.java Tue Jun 16 04:33:56 2009
@@ -31,10 +31,10 @@
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 
 /**
  * Abstract base class for operations that need to examine all HRegionInfo 
@@ -80,26 +80,28 @@
       // Open a scanner on the meta region
       byte [] tableNameMetaStart =
           Bytes.toBytes(Bytes.toString(tableName) + ",,");
-
-      long scannerId = server.openScanner(m.getRegionName(),
-          COLUMN_FAMILY_ARRAY, tableNameMetaStart, HConstants.LATEST_TIMESTAMP, null);
+      Scan scan = new Scan(tableNameMetaStart).addFamily(CATALOG_FAMILY);
+      long scannerId = server.openScanner(m.getRegionName(), scan);
 
       List<byte []> emptyRows = new ArrayList<byte []>();
       try {
         while (true) {
-          RowResult values = server.next(scannerId);
-          if(values == null || values.size() == 0) {
+          Result values = server.next(scannerId);
+          if(values == null || values.isEmpty()) {
             break;
           }
           HRegionInfo info = this.master.getHRegionInfo(values.getRow(), values);
           if (info == null) {
             emptyRows.add(values.getRow());
-            LOG.error(Bytes.toString(COL_REGIONINFO) + " not found on " +
-                      Bytes.toString(values.getRow()));
+            LOG.error(Bytes.toString(CATALOG_FAMILY) + ":" +
+                Bytes.toString(REGIONINFO_QUALIFIER) + " not found on " +
+                      Bytes.toStringBinary(values.getRow()));
             continue;
           }
-          String serverAddress = Writables.cellToString(values.get(COL_SERVER));
-          long startCode = Writables.cellToLong(values.get(COL_STARTCODE)); 
+          String serverAddress = 
+            Bytes.toString(values.getValue(CATALOG_FAMILY, SERVER_QUALIFIER));
+          long startCode = 
+            Bytes.toLong(values.getValue(CATALOG_FAMILY, STARTCODE_QUALIFIER)); 
           String serverName = null;
           if (serverAddress != null && serverAddress.length() > 0) {
             serverName = HServerInfo.getServerName(serverAddress, startCode);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java Tue Jun 16 04:33:56 2009
@@ -42,7 +42,7 @@
 
   /**
    * Create a watcher with a ZooKeeperWrapper instance.
-   * @param zooKeeper ZooKeeperWrapper to use to talk to ZooKeeper.
+   * @param master The master.
    */
   public ZKMasterAddressWatcher(HMaster master) {
     this.master = master;

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,112 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Simple wrapper for a byte buffer and a counter.  Does not copy.
+ * <p>
+ * NOT thread-safe because it is not used in a multi-threaded context, yet.
+ */
+public class ColumnCount {
+  private byte [] bytes;
+  private int offset;
+  private int length;
+  private int count;
+  
+  /**
+   * Constructor
+   * @param column the qualifier to count the versions for
+   */
+  public ColumnCount(byte [] column) {
+    this(column, 0);
+  }
+  
+  /**
+   * Constructor
+   * @param column the qualifier to count the versions for
+   * @param count initial count
+   */
+  public ColumnCount(byte [] column, int count) {
+    this(column, 0, column.length, count);
+  }
+  
+  /**
+   * Constuctor
+   * @param column the qualifier to count the versions for
+   * @param offset in the passed buffer where to start the qualifier from
+   * @param length of the qualifier
+   * @param count initial count
+   */
+  public ColumnCount(byte [] column, int offset, int length, int count) {
+    this.bytes = column;
+    this.offset = offset;
+    this.length = length;
+    this.count = count;
+  }
+  
+  /**
+   * @return the buffer
+   */
+  public byte [] getBuffer(){
+    return this.bytes;
+  }
+  
+  /**
+   * @return the offset
+   */
+  public int getOffset(){
+    return this.offset;
+  }
+  
+  /**
+   * @return the length
+   */
+  public int getLength(){
+    return this.length;
+  }  
+  
+  /**
+   * Decrement the current version count
+   * @return current count
+   */
+  public int decrement() {
+    return --count;
+  }
+
+  /**
+   * Increment the current version count
+   * @return current count
+   */
+  public int increment() {
+    return ++count;
+  }
+  
+  /**
+   * Check to see if needed to fetch more versions
+   * @param max
+   * @return true if more versions are needed, false otherwise
+   */
+  public boolean needMore(int max) {
+    if(this.count < max) {
+      return true;
+    }
+    return false;
+  }
+}

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,81 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+
+/**
+ * Implementing classes of this interface will be used for the tracking
+ * and enforcement of columns and numbers of versions during the course of a 
+ * Get or Scan operation.
+ * <p>
+ * Currently there are two different types of Store/Family-level queries.
+ * <ul><li>{@link ExplicitColumnTracker} is used when the query specifies
+ * one or more column qualifiers to return in the family.
+ * <li>{@link WildcardColumnTracker} is used when the query asks for all
+ * qualifiers within the family.
+ * <p>
+ * This class is utilized by {@link QueryMatcher} through two methods:
+ * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
+ * conditions of the query.  This method returns a {@link MatchCode} to define
+ * what action should be taken.
+ * <li>{@link #update} is called at the end of every StoreFile or Memcache.
+ * <p>
+ * This class is NOT thread-safe as queries are never multi-threaded 
+ */
+public interface ColumnTracker {
+  /**
+   * Keeps track of the number of versions for the columns asked for
+   * @param bytes
+   * @param offset
+   * @param length
+   * @return The match code instance.
+   */
+  public MatchCode checkColumn(byte [] bytes, int offset, int length);
+  
+  /**
+   * Updates internal variables in between files
+   */
+  public void update();
+  
+  /**
+   * Resets the Matcher
+   */
+  public void reset();
+  
+  /**
+   * 
+   * @return <code>true</code> when done.
+   */
+  public boolean done();
+
+  /**
+   * Used by matcher and scan/get to get a hint of the next column
+   * to seek to after checkColumn() returns SKIP.  Returns the next interesting
+   * column we want, or NULL there is none (wildcard scanner).
+   *
+   * Implementations aren't required to return anything useful unless the most recent
+   * call was to checkColumn() and the return code was SKIP.  This is pretty implementation
+   * detail-y, but optimizations are like that.
+   *
+   * @return null, or a ColumnCount that we should seek to
+   */
+  public ColumnCount getColumnHint();
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Tue Jun 16 04:33:56 2009
@@ -30,11 +30,11 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
 
@@ -114,14 +114,14 @@
         continue;
       } catch (IOException ex) {
         LOG.error("Compaction/Split failed" +
-            (r != null ? (" for region " + Bytes.toString(r.getRegionName())) : ""),
+            (r != null ? (" for region " + r.getRegionNameAsString()) : ""),
             RemoteExceptionHandler.checkIOException(ex));
         if (!server.checkFileSystem()) {
           break;
         }
       } catch (Exception ex) {
         LOG.error("Compaction failed" +
-            (r != null ? (" for region " + Bytes.toString(r.getRegionName())) : ""),
+            (r != null ? (" for region " + r.getRegionNameAsString()) : ""),
             ex);
         if (!server.checkFileSystem()) {
           break;
@@ -155,7 +155,7 @@
     r.setForceMajorCompaction(force);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Compaction " + (force? "(major) ": "") +
-        "requested for region " + Bytes.toString(r.getRegionName()) +
+        "requested for region " + r.getRegionNameAsString() +
         "/" + r.getRegionInfo().getEncodedName() +
         (why != null && !why.isEmpty()? " because: " + why: ""));
     }
@@ -202,18 +202,21 @@
     // Inform the HRegionServer that the parent HRegion is no-longer online.
     this.server.removeFromOnlineRegions(oldRegionInfo);
     
-    BatchUpdate update = new BatchUpdate(oldRegionInfo.getRegionName());
-    update.put(COL_REGIONINFO, Writables.getBytes(oldRegionInfo));
-    update.put(COL_SPLITA, Writables.getBytes(newRegions[0].getRegionInfo()));
-    update.put(COL_SPLITB, Writables.getBytes(newRegions[1].getRegionInfo()));
-    t.commit(update);
+    Put put = new Put(oldRegionInfo.getRegionName());
+    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, 
+        Writables.getBytes(oldRegionInfo));
+    put.add(CATALOG_FAMILY, SPLITA_QUALIFIER,
+        Writables.getBytes(newRegions[0].getRegionInfo()));
+    put.add(CATALOG_FAMILY, SPLITB_QUALIFIER,
+        Writables.getBytes(newRegions[1].getRegionInfo()));
+    t.put(put);
     
     // Add new regions to META
     for (int i = 0; i < newRegions.length; i++) {
-      update = new BatchUpdate(newRegions[i].getRegionName());
-      update.put(COL_REGIONINFO, Writables.getBytes(
+      put = new Put(newRegions[i].getRegionName());
+      put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(
         newRegions[i].getRegionInfo()));
-      t.commit(update);
+      t.put(put);
     }
         
     // Now tell the master about the new regions

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,120 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * Class that provides static method needed when putting deletes into memcache 
+ */
+public class DeleteCompare {
+  
+  /**
+   * Return codes from deleteCompare.
+   */
+  enum DeleteCode {
+    /**
+     * Do nothing.  Move to next KV in Memcache
+     */
+    SKIP,
+    
+    /**
+     * Add to the list of deletes.
+     */
+    DELETE,
+    
+    /**
+     * Stop looking at KVs in Memcache.  Finalize.
+     */
+    DONE
+  }
+
+  /**
+   * Method used when putting deletes into memcache to remove all the previous
+   * entries that are affected by this Delete
+   * @param mem
+   * @param deleteBuffer
+   * @param deleteRowOffset
+   * @param deleteRowLength
+   * @param deleteQualifierOffset
+   * @param deleteQualifierLength
+   * @param deleteTimeOffset
+   * @param deleteType
+   * @param comparator
+   * @return SKIP if current KeyValue should not be deleted, DELETE if
+   * current KeyValue should be deleted and DONE when the current KeyValue is
+   * out of the Deletes range
+   */
+  public static DeleteCode deleteCompare(KeyValue mem, byte [] deleteBuffer,
+      int deleteRowOffset, short deleteRowLength, int deleteQualifierOffset,
+      int deleteQualifierLength, int deleteTimeOffset, byte deleteType,
+      KeyValue.KeyComparator comparator) {
+
+    //Parsing new KeyValue
+    byte [] memBuffer = mem.getBuffer();
+    int memOffset = mem.getOffset();
+
+    //Getting key lengths
+    int memKeyLen = Bytes.toInt(memBuffer, memOffset);
+    memOffset += Bytes.SIZEOF_INT;
+
+    //Skipping value lengths
+    memOffset += Bytes.SIZEOF_INT;
+
+    //Getting row lengths
+    short memRowLen = Bytes.toShort(memBuffer, memOffset);
+    memOffset += Bytes.SIZEOF_SHORT;
+    int res = comparator.compareRows(memBuffer, memOffset, memRowLen,
+        deleteBuffer, deleteRowOffset, deleteRowLength);
+    if(res > 0) {
+      return DeleteCode.DONE;
+    } else if(res < 0){
+      System.out.println("SKIPPING ROW");
+      return DeleteCode.SKIP;
+    }
+
+    memOffset += memRowLen;
+
+    //Getting family lengths
+    byte memFamLen = memBuffer[memOffset];
+    memOffset += Bytes.SIZEOF_BYTE + memFamLen;
+
+    //Get column lengths
+    int memQualifierLen = memKeyLen - memRowLen - memFamLen -
+      Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG -
+      Bytes.SIZEOF_BYTE;
+
+    //Compare timestamp
+    int tsOffset = memOffset + memQualifierLen;
+    int timeRes = Bytes.compareTo(memBuffer, tsOffset, Bytes.SIZEOF_LONG,
+        deleteBuffer, deleteTimeOffset, Bytes.SIZEOF_LONG);
+
+    if(deleteType == KeyValue.Type.DeleteFamily.getCode()) {
+      if(timeRes <= 0){
+        return DeleteCode.DELETE;
+      }
+      return DeleteCode.SKIP;
+    }
+
+    //Compare columns
+    res = Bytes.compareTo(memBuffer, memOffset, memQualifierLen,
+        deleteBuffer, deleteQualifierOffset, deleteQualifierLength);
+    if(res < 0) {
+      return DeleteCode.SKIP;
+    } else if(res > 0) {
+      return DeleteCode.DONE;
+    }
+    // same column, compare the time.
+    if(timeRes == 0) {
+      return DeleteCode.DELETE;
+    } else if (timeRes < 0) {
+      if(deleteType == KeyValue.Type.DeleteColumn.getCode()) {
+        return DeleteCode.DELETE;
+      }
+      return DeleteCode.DONE;
+    } else {
+      System.out.println("SKIPPING TS");
+      return DeleteCode.SKIP;
+    }
+  } 
+}

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * This interface is used for the tracking and enforcement of Deletes
+ * during the course of a Get or Scan operation.
+ * <p>
+ * This class is utilized through three methods:
+ * <ul><li>{@link #add} when encountering a Delete
+ * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
+ * <li>{@link #update} when reaching the end of a StoreFile 
+ */
+public interface DeleteTracker {
+  
+  /**
+   * Add the specified KeyValue to the list of deletes to check against for
+   * this row operation.
+   * <p>
+   * This is called when a Delete is encountered in a StoreFile.
+   * @param buffer KeyValue buffer
+   * @param qualifierOffset column qualifier offset
+   * @param qualifierLength column qualifier length
+   * @param timestamp timestamp
+   * @param type delete type as byte
+   */
+  public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
+      long timestamp, byte type);
+  
+  /**
+   * Check if the specified KeyValue buffer has been deleted by a previously
+   * seen delete.
+   * @param buffer KeyValue buffer
+   * @param qualifierOffset column qualifier offset
+   * @param qualifierLength column qualifier length
+   * @param timestamp timestamp
+   * @return true is the specified KeyValue is deleted, false if not
+   */
+  public boolean isDeleted(byte [] buffer, int qualifierOffset,
+      int qualifierLength, long timestamp);
+  
+  /**
+   * @return true if there are no current delete, false otherwise
+   */
+  public boolean isEmpty();
+  
+  /**
+   * Called at the end of every StoreFile.
+   * <p>
+   * Many optimized implementations of Trackers will require an update at
+   * when the end of each StoreFile is reached.
+   */
+  public void update();
+  
+  /**
+   * Called between rows.
+   * <p>
+   * This clears everything as if a new DeleteTracker was instantiated.
+   */
+  public void reset();
+  
+
+  /**
+   * Return codes for comparison of two Deletes.
+   * <p>
+   * The codes tell the merging function what to do.
+   * <p>
+   * INCLUDE means add the specified Delete to the merged list.
+   * NEXT means move to the next element in the specified list(s).
+   */
+  enum DeleteCompare { 
+    INCLUDE_OLD_NEXT_OLD,
+    INCLUDE_OLD_NEXT_BOTH,
+    INCLUDE_NEW_NEXT_NEW,
+    INCLUDE_NEW_NEXT_BOTH,
+    NEXT_OLD,
+    NEXT_NEW
+  }
+  
+}

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.NavigableSet;
+import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * This class is used for the tracking and enforcement of columns and numbers 
+ * of versions during the course of a Get or Scan operation, when explicit
+ * column qualifiers have been asked for in the query.
+ *
+ * With a little magic (see {@link ScanQueryMatcher}), we can use this matcher
+ * for both scans and gets.  The main difference is 'next' and 'done' collapse
+ * for the scan case (since we see all columns in order), and we only reset
+ * between rows.
+ * 
+ * <p>
+ * This class is utilized by {@link QueryMatcher} through two methods:
+ * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
+ * conditions of the query.  This method returns a {@link MatchCode} to define
+ * what action should be taken.
+ * <li>{@link #update} is called at the end of every StoreFile or Memcache.
+ * <p>
+ * This class is NOT thread-safe as queries are never multi-threaded 
+ */
+public class ExplicitColumnTracker implements ColumnTracker {
+
+  private int maxVersions;
+  private List<ColumnCount> columns;
+  private int index;
+  private ColumnCount column;
+  private NavigableSet<byte[]> origColumns;
+  
+  /**
+   * Default constructor.
+   * @param columns columns specified user in query
+   * @param maxVersions maximum versions to return per column
+   */
+  public ExplicitColumnTracker(NavigableSet<byte[]> columns, int maxVersions) {
+    this.maxVersions = maxVersions;
+    this.origColumns = columns;
+    reset();
+  }
+  
+  /**
+   * Done when there are no more columns to match against.
+   */
+  public boolean done() {
+    return this.columns.size() == 0;
+  }
+
+  public ColumnCount getColumnHint() {
+    return this.column;
+  }
+  
+  /**
+   * Checks against the parameters of the query and the columns which have
+   * already been processed by this query.
+   * @param bytes KeyValue buffer
+   * @param offset offset to the start of the qualifier
+   * @param length length of the qualifier
+   * @return MatchCode telling QueryMatcher what action to take
+   */
+  public MatchCode checkColumn(byte [] bytes, int offset, int length) {
+    // No more columns left, we are done with this query
+    if(this.columns.size() == 0) {
+      return MatchCode.DONE; // done_row
+    }
+    
+    // No more columns to match against, done with storefile
+    if(this.column == null) {
+      return MatchCode.NEXT; // done_row
+    }
+    
+    // Compare specific column to current column
+    int ret = Bytes.compareTo(column.getBuffer(), column.getOffset(), 
+        column.getLength(), bytes, offset, length);
+    
+    // Matches, decrement versions left and include
+    if(ret == 0) {
+      if(this.column.decrement() == 0) {
+        // Done with versions for this column
+        this.columns.remove(this.index);
+        if(this.columns.size() == this.index) {
+          // Will not hit any more columns in this storefile
+          this.column = null;
+        } else {
+          this.column = this.columns.get(this.index);
+        }
+      }
+      return MatchCode.INCLUDE;
+    }
+
+    // Specified column is bigger than current column
+    // Move down current column and check again
+    if(ret <= -1) {
+      if(++this.index == this.columns.size()) {
+        // No more to match, do not include, done with storefile
+        return MatchCode.NEXT; // done_row
+      }
+      this.column = this.columns.get(this.index);
+      return checkColumn(bytes, offset, length);
+    }
+
+    // Specified column is smaller than current column
+    // Skip
+    return MatchCode.SKIP; // skip to next column, with hint?
+  }
+  
+  /**
+   * Called at the end of every StoreFile or Memcache.
+   */
+  public void update() {
+    if(this.columns.size() != 0) {
+      this.index = 0;
+      this.column = this.columns.get(this.index);
+    } else {
+      this.index = -1;
+      this.column = null;
+    }
+  }
+
+  // Called between every row.
+  public void reset() {
+    buildColumnList(this.origColumns);
+    this.index = 0;
+    this.column = this.columns.get(this.index);
+  }
+
+  private void buildColumnList(NavigableSet<byte[]> columns) {
+    this.columns = new ArrayList<ColumnCount>(columns.size());
+    for(byte [] column : columns) {
+      this.columns.add(new ColumnCount(column,maxVersions));
+    }
+  }
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java Tue Jun 16 04:33:56 2009
@@ -28,10 +28,16 @@
 class FailedLogCloseException extends IOException {
   private static final long serialVersionUID = 1759152841462990925L;
 
+  /**
+   * 
+   */
   public FailedLogCloseException() {
     super();
   }
 
+  /**
+   * @param arg0
+   */
   public FailedLogCloseException(String arg0) {
     super(arg0);
   }

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * This class is responsible for the tracking and enforcement of Deletes
+ * during the course of a Get operation.
+ * <p>
+ * This class is utilized through three methods:
+ * <ul><li>{@link #add} when encountering a Delete
+ * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
+ * <li>{@link #update} when reaching the end of a StoreFile
+ * <p>
+ * This class is NOT thread-safe as queries are never multi-threaded 
+ */
+public class GetDeleteTracker implements DeleteTracker {
+
+  private long familyStamp = -1L;
+  protected List<Delete> deletes = null;
+  private List<Delete> newDeletes = new ArrayList<Delete>();
+  private Iterator<Delete> iterator;
+  private Delete delete = null;
+
+  /**
+   * Constructor
+   * @param comparator
+   */
+  public GetDeleteTracker() {}
+
+  /**
+   * Add the specified KeyValue to the list of deletes to check against for
+   * this row operation.
+   * <p>
+   * This is called when a Delete is encountered in a StoreFile.
+   * @param buffer
+   * @param qualifierOffset
+   * @param qualifierLength
+   * @param timestamp
+   * @param type
+   */
+  @Override
+  public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
+      long timestamp, byte type) {
+    if(type == KeyValue.Type.DeleteFamily.getCode()) {
+      if(timestamp > familyStamp) {
+        familyStamp = timestamp;
+      }
+      return;
+    }
+    if(timestamp > familyStamp) {
+      this.newDeletes.add(new Delete(buffer, qualifierOffset, qualifierLength,
+          type, timestamp));
+    }
+  }
+
+  /** 
+   * Check if the specified KeyValue buffer has been deleted by a previously
+   * seen delete.
+   * @param buffer KeyValue buffer
+   * @param qualifierOffset column qualifier offset
+   * @param qualifierLength column qualifier length
+   * @param timestamp timestamp
+   * @return true is the specified KeyValue is deleted, false if not
+   */
+  @Override
+  public boolean isDeleted(byte [] buffer, int qualifierOffset,
+      int qualifierLength, long timestamp) {
+
+    // Check against DeleteFamily
+    if(timestamp <= familyStamp) {
+      return true;
+    }
+
+    // Check if there are other deletes
+    if(this.delete == null) {
+      return false;
+    }
+
+    // Check column
+    int ret = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
+        this.delete.buffer, this.delete.qualifierOffset, 
+        this.delete.qualifierLength);
+    if(ret <= -1) {
+      // Have not reached the next delete yet
+      return false;
+    } else if(ret >= 1) {
+      // Deletes an earlier column, need to move down deletes
+      if(this.iterator.hasNext()) {
+        this.delete = this.iterator.next();
+      } else {
+        this.delete = null;
+        return false;
+      }
+      return isDeleted(buffer, qualifierOffset, qualifierLength, timestamp);
+    }
+
+    // Check Timestamp
+    if(timestamp > this.delete.timestamp) {
+      return false;
+    }
+
+    // Check Type
+    switch(KeyValue.Type.codeToType(this.delete.type)) {
+    case Delete:
+      boolean equal = timestamp == this.delete.timestamp;
+
+      if(this.iterator.hasNext()) {
+        this.delete = this.iterator.next();
+      } else {
+        this.delete = null;
+      }
+
+      if(equal){
+        return true;
+      }
+      // timestamp < this.delete.timestamp
+      // Delete of an explicit column newer than current
+      return isDeleted(buffer, qualifierOffset, qualifierLength, timestamp);
+    case DeleteColumn:
+      return true;
+    }
+
+    // should never reach this
+    return false;
+  }
+
+  @Override
+  public boolean isEmpty() {
+    if(this.familyStamp == 0L && this.delete == null) {
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public void reset() {
+    this.deletes = null;
+    this.delete = null;
+    this.newDeletes = new ArrayList<Delete>();
+    this.familyStamp = 0L;
+    this.iterator = null;
+  }
+
+  /**
+   * Called at the end of every StoreFile.
+   * <p>
+   * Many optimized implementations of Trackers will require an update at
+   * when the end of each StoreFile is reached.
+   */
+  @Override
+  public void update() {
+    // If no previous deletes, use new deletes and return
+    if(this.deletes == null || this.deletes.size() == 0) {
+      finalize(this.newDeletes);
+      return;
+    }
+
+    // If no new delete, retain previous deletes and return
+    if(this.newDeletes.size() == 0) {
+      return;
+    }
+
+    // Merge previous deletes with new deletes
+    List<Delete> mergeDeletes = 
+      new ArrayList<Delete>(this.newDeletes.size());
+    int oldIndex = 0;
+    int newIndex = 0;
+
+    Delete newDelete = newDeletes.get(oldIndex);
+    Delete oldDelete = deletes.get(oldIndex);
+    while(true) {
+      switch(compareDeletes(oldDelete,newDelete)) {
+      case NEXT_NEW: {
+        if(++newIndex == newDeletes.size()) {
+          // Done with new, add the rest of old to merged and return
+          mergeDown(mergeDeletes, deletes, oldIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        newDelete = this.newDeletes.get(newIndex);
+        break;
+      }
+
+      case INCLUDE_NEW_NEXT_NEW: {
+        mergeDeletes.add(newDelete);
+        if(++newIndex == newDeletes.size()) {
+          // Done with new, add the rest of old to merged and return
+          mergeDown(mergeDeletes, deletes, oldIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        newDelete = this.newDeletes.get(newIndex);
+        break;
+      }
+
+      case INCLUDE_NEW_NEXT_BOTH: {
+        mergeDeletes.add(newDelete);
+        ++oldIndex;
+        ++newIndex;
+        if(oldIndex == deletes.size()) {
+          if(newIndex == newDeletes.size()) {
+            finalize(mergeDeletes);
+            return;
+          }
+          mergeDown(mergeDeletes, newDeletes, newIndex);
+          finalize(mergeDeletes);
+          return;
+        } else if(newIndex == newDeletes.size()) {
+          mergeDown(mergeDeletes, deletes, oldIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        oldDelete = this.deletes.get(oldIndex);
+        newDelete = this.newDeletes.get(newIndex);
+        break;
+      }
+
+      case INCLUDE_OLD_NEXT_BOTH: {
+        mergeDeletes.add(oldDelete);
+        ++oldIndex;
+        ++newIndex;
+        if(oldIndex == deletes.size()) {
+          if(newIndex == newDeletes.size()) {
+            finalize(mergeDeletes);
+            return;
+          }
+          mergeDown(mergeDeletes, newDeletes, newIndex);
+          finalize(mergeDeletes);
+          return;
+        } else if(newIndex == newDeletes.size()) {
+          mergeDown(mergeDeletes, deletes, oldIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        oldDelete = this.deletes.get(oldIndex);
+        newDelete = this.newDeletes.get(newIndex);
+        break;
+      }
+
+      case INCLUDE_OLD_NEXT_OLD: {
+        mergeDeletes.add(oldDelete);
+        if(++oldIndex == deletes.size()) {
+          mergeDown(mergeDeletes, newDeletes, newIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        oldDelete = this.deletes.get(oldIndex);
+        break;
+      }
+
+      case NEXT_OLD: {
+        if(++oldIndex == deletes.size()) {
+          // Done with old, add the rest of new to merged and return
+          mergeDown(mergeDeletes, newDeletes, newIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        oldDelete = this.deletes.get(oldIndex);
+      }
+      }
+    }
+  }
+
+  private void finalize(List<Delete> mergeDeletes) {
+    this.deletes = mergeDeletes;
+    this.newDeletes = new ArrayList<Delete>();
+    if(this.deletes.size() > 0){
+      this.iterator = deletes.iterator();
+      this.delete = iterator.next();
+    }
+  }
+
+  private void mergeDown(List<Delete> mergeDeletes, List<Delete> srcDeletes, 
+      int srcIndex) {
+    int index = srcIndex;
+    while(index < srcDeletes.size()) {
+      mergeDeletes.add(srcDeletes.get(index++));
+    }
+  }
+
+
+  protected DeleteCompare compareDeletes(Delete oldDelete, Delete newDelete) {
+
+    // Compare columns
+    // Just compairing qualifier portion, can keep on using Bytes.compareTo().
+    int ret = Bytes.compareTo(oldDelete.buffer, oldDelete.qualifierOffset,
+        oldDelete.qualifierLength, newDelete.buffer, newDelete.qualifierOffset,
+        newDelete.qualifierLength);
+
+    if(ret <= -1) {
+      return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
+    } else if(ret >= 1) {
+      return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
+    }
+
+    // Same column
+
+    // Branches below can be optimized.  Keeping like this until testing
+    // is complete.
+    if(oldDelete.type == newDelete.type) {
+      // the one case where we can merge 2 deletes -> 1 delete.
+      if(oldDelete.type == KeyValue.Type.Delete.getCode()){
+        if(oldDelete.timestamp > newDelete.timestamp) {
+          return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
+        } else if(oldDelete.timestamp < newDelete.timestamp) {
+          return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
+        } else {
+          return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
+        }
+      }
+      if(oldDelete.timestamp < newDelete.timestamp) {
+        return DeleteCompare.INCLUDE_NEW_NEXT_BOTH;
+      } 
+      return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
+    }
+
+    // old delete is more specific than the new delete.
+    // if the olddelete is newer then the newdelete, we have to
+    //  keep it
+    if(oldDelete.type < newDelete.type) {
+      if(oldDelete.timestamp > newDelete.timestamp) {
+        return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
+      } else if(oldDelete.timestamp < newDelete.timestamp) {
+        return DeleteCompare.NEXT_OLD;
+      } else {
+        return DeleteCompare.NEXT_OLD;
+      }
+    }
+
+    // new delete is more specific than the old delete.
+    if(oldDelete.type > newDelete.type) {
+      if(oldDelete.timestamp > newDelete.timestamp) {
+        return DeleteCompare.NEXT_NEW;
+      } else if(oldDelete.timestamp < newDelete.timestamp) {
+        return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
+      } else {
+        return DeleteCompare.NEXT_NEW;
+      }
+    }
+
+    // Should never reach,
+    // throw exception for assertion?
+    throw new RuntimeException("GetDeleteTracker:compareDelete reached terminal state");
+  }
+
+  /**
+   * Internal class used to store the necessary information for a Delete.
+   * <p>
+   * Rather than reparsing the KeyValue, or copying fields, this class points
+   * to the underlying KeyValue buffer with pointers to the qualifier and fields
+   * for type and timestamp.  No parsing work is done in DeleteTracker now.
+   * <p>
+   * Fields are public because they are accessed often, directly, and only
+   * within this class.
+   */
+  protected class Delete {
+    byte [] buffer;
+    int qualifierOffset;
+    int qualifierLength;
+    byte type;
+    long timestamp;
+    /**
+     * Constructor
+     * @param buffer
+     * @param qualifierOffset
+     * @param qualifierLength
+     * @param type
+     * @param timestamp
+     */
+    public Delete(byte [] buffer, int qualifierOffset, int qualifierLength,
+        byte type, long timestamp) {
+      this.buffer = buffer;
+      this.qualifierOffset = qualifierOffset;
+      this.qualifierLength = qualifierLength;
+      this.type = type;
+      this.timestamp = timestamp;
+    }
+  }
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Tue Jun 16 04:33:56 2009
@@ -100,7 +100,7 @@
 public class HLog implements HConstants, Syncable {
   static final Log LOG = LogFactory.getLog(HLog.class);
   private static final String HLOG_DATFILE = "hlog.dat.";
-  static final byte [] METACOLUMN = Bytes.toBytes("METACOLUMN:");
+  static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY");
   static final byte [] METAROW = Bytes.toBytes("METAROW");
   private final FileSystem fs;
   private final Path dir;
@@ -701,8 +701,8 @@
   }
 
   private KeyValue completeCacheFlushLogEdit() {
-    return new KeyValue(METAROW, METACOLUMN, System.currentTimeMillis(),
-      COMPLETE_CACHE_FLUSH);
+    return new KeyValue(METAROW, METAFAMILY, null,
+      System.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
   }
 
   /**
@@ -716,11 +716,11 @@
   }
 
   /**
-   * @param column
+   * @param family
    * @return true if the column is a meta column
    */
-  public static boolean isMetaColumn(byte [] column) {
-    return Bytes.equals(METACOLUMN, column);
+  public static boolean isMetaFamily(byte [] family) {
+    return Bytes.equals(METAFAMILY, family);
   }
   
   /**
@@ -870,6 +870,7 @@
           Executors.newFixedThreadPool(DEFAULT_NUMBER_LOG_WRITER_THREAD);
         for (final byte[] key : logEntries.keySet()) {
           Thread thread = new Thread(Bytes.toString(key)) {
+            @Override
             public void run() {
               LinkedList<HLogEntry> entries = logEntries.get(key);
               LOG.debug("Thread got " + entries.size() + " to process");

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java Tue Jun 16 04:33:56 2009
@@ -87,6 +87,9 @@
     return logSeqNum;
   }
 
+  /**
+   * @return the write time
+   */
   public long getWriteTime() {
     return this.writeTime;
   }



Mime
View raw message