hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject svn commit: r546192 [2/3] - in /lucene/hadoop/trunk/src/contrib/hbase: ./ conf/ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/
Date Mon, 11 Jun 2007 16:46:31 GMT
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java Mon Jun 11 09:46:27 2007
@@ -31,9 +31,13 @@
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Text;
 
-public class HMerge implements HConstants {
-  private static final Log LOG = LogFactory.getLog(HMerge.class);
-  private static final Text[] META_COLS = {COL_REGIONINFO};
+/** 
+ * A non-instantiable class that has a static method capable of compacting
+ * a table by merging adjacent regions that have grown too small.
+ */
+class HMerge implements HConstants {
+  static final Log LOG = LogFactory.getLog(HMerge.class);
+  static final Text[] META_COLS = {COL_REGIONINFO};
   
   private HMerge() {}                           // Not instantiable
   
@@ -93,13 +97,13 @@
       this.more = true;
       this.key = new HStoreKey();
       this.info = new HRegionInfo();
-      this.dir = new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR));
+      this.dir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
       this.basedir = new Path(dir, "merge_" + System.currentTimeMillis());
       fs.mkdirs(basedir);
       this.hlog = new HLog(fs, new Path(basedir, HREGION_LOGDIR_NAME), conf);
     }
     
-    public void process() throws IOException {
+    void process() throws IOException {
       try {
         while(more) {
           TreeSet<HRegionInfo> regionsToMerge = next();
@@ -110,7 +114,7 @@
         }
       } finally {
         try {
-          hlog.close();
+          hlog.closeAndDelete();
           
         } catch(IOException e) {
           LOG.error(e);
@@ -137,12 +141,12 @@
       for(int i = 0; i < regions.length - 1; i++) {
         if(currentRegion == null) {
           currentRegion =
-            new HRegion(dir, hlog, fs, conf, regions[i], null, null);
+            new HRegion(dir, hlog, fs, conf, regions[i], null);
 
           currentSize = currentRegion.largestHStore();
         }
         nextRegion =
-          new HRegion(dir, hlog, fs, conf, regions[i + 1], null, null);
+          new HRegion(dir, hlog, fs, conf, regions[i + 1], null);
 
         nextSize = nextRegion.largestHStore();
 
@@ -164,10 +168,9 @@
           i++;
           continue;
           
-        } else {
-          LOG.info("not merging regions " + currentRegion.getRegionName()
-              + " and " + nextRegion.getRegionName());
         }
+        LOG.info("not merging regions " + currentRegion.getRegionName()
+            + " and " + nextRegion.getRegionName());
 
         currentRegion.close();
         currentRegion = nextRegion;
@@ -184,13 +187,14 @@
         HRegion newRegion) throws IOException;
     
   }
-  
+
+  /** Instantiated to compact a normal user table */
   private static class OnlineMerger extends Merger {
     private HClient client;
     private HScannerInterface metaScanner;
     private HRegionInfo latestRegion;
     
-    public OnlineMerger(Configuration conf, FileSystem fs, HClient client,
+    OnlineMerger(Configuration conf, FileSystem fs, HClient client,
         Text tableName) throws IOException {
       
       super(conf, fs, tableName);
@@ -231,6 +235,7 @@
       }
     }
 
+    @Override
     protected TreeSet<HRegionInfo> next() throws IOException {
       TreeSet<HRegionInfo> regions = new TreeSet<HRegionInfo>();
       if(latestRegion == null) {
@@ -245,7 +250,8 @@
       }
       return regions;
     }
-    
+
+    @Override
     protected void updateMeta(Text oldRegion1, Text oldRegion2,
         HRegion newRegion) throws IOException {
       Text[] regionsToDelete = {
@@ -307,23 +313,24 @@
     }
   }
 
+  /** Instantiated to compact the meta region */
   private static class OfflineMerger extends Merger {
     private Path dir;
     private TreeSet<HRegionInfo> metaRegions;
     private TreeMap<Text, BytesWritable> results;
     
-    public OfflineMerger(Configuration conf, FileSystem fs, Text tableName)
+    OfflineMerger(Configuration conf, FileSystem fs, Text tableName)
         throws IOException {
       
       super(conf, fs, tableName);
-      this.dir = new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR));
+      this.dir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
       this.metaRegions = new TreeSet<HRegionInfo>();
       this.results = new TreeMap<Text, BytesWritable>();
 
       // Scan root region to find all the meta regions
       
-      HRegion root = new HRegion(dir, hlog,fs, conf, HGlobals.rootRegionInfo,
-          null, null);
+      HRegion root =
+        new HRegion(dir, hlog,fs, conf, HGlobals.rootRegionInfo, null);
 
       HInternalScannerInterface rootScanner =
         root.getScanner(META_COLS, new Text());
@@ -350,16 +357,18 @@
       }
     }
 
+    @Override
     protected TreeSet<HRegionInfo> next() throws IOException {
       more = false;
       return metaRegions;
     }
-    
+
+    @Override
     protected void updateMeta(Text oldRegion1, Text oldRegion2,
         HRegion newRegion) throws IOException {
       
       HRegion root =
-        new HRegion(dir, hlog, fs, conf, HGlobals.rootRegionInfo, null, null);
+        new HRegion(dir, hlog, fs, conf, HGlobals.rootRegionInfo, null);
 
       Text[] regionsToDelete = {
           oldRegion1,

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java Mon Jun 11 09:46:27 2007
@@ -24,55 +24,149 @@
  * HRegionServers.
  ******************************************************************************/
 public class HMsg implements Writable {
+  
+  // Messages sent from master to region server
+  
+  /** Start serving the specified region */
   public static final byte MSG_REGION_OPEN = 1;
+  
+  /** Stop serving the specified region */
   public static final byte MSG_REGION_CLOSE = 2;
-  public static final byte MSG_REGION_MERGE = 3;
+
+  /** Region server is unknown to master. Restart */
   public static final byte MSG_CALL_SERVER_STARTUP = 4;
+  
+  /** Master tells region server to stop */
   public static final byte MSG_REGIONSERVER_STOP = 5;
+  
+  /** Stop serving the specified region and don't report back that it's closed */
   public static final byte MSG_REGION_CLOSE_WITHOUT_REPORT = 6;
 
+  // Messages sent from the region server to the master
+  
+  /** region server is now serving the specified region */
   public static final byte MSG_REPORT_OPEN = 100;
+  
+  /** region server is no longer serving the specified region */
   public static final byte MSG_REPORT_CLOSE = 101;
-  public static final byte MSG_REGION_SPLIT = 102;
+
+  /** region server is now serving a region produced by a region split */
   public static final byte MSG_NEW_REGION = 103;
+  
+  /** region server is shutting down */
   public static final byte MSG_REPORT_EXITING = 104;
 
   byte msg;
   HRegionInfo info;
 
+  /** Default constructor. Used during deserialization */
   public HMsg() {
     this.info = new HRegionInfo();
   }
 
+  /**
+   * Construct a message with an empty HRegionInfo
+   * 
+   * @param msg - message code
+   */
   public HMsg(byte msg) {
     this.msg = msg;
     this.info = new HRegionInfo();
   }
   
+  /**
+   * Construct a message with the specified message code and HRegionInfo
+   * 
+   * @param msg - message code
+   * @param info - HRegionInfo
+   */
   public HMsg(byte msg, HRegionInfo info) {
     this.msg = msg;
     this.info = info;
   }
 
+  /**
+   * Accessor
+   * @return message code
+   */
   public byte getMsg() {
     return msg;
   }
 
+  /**
+   * Accessor
+   * @return HRegionInfo
+   */
   public HRegionInfo getRegionInfo() {
     return info;
   }
 
-
+  @Override
+  public String toString() {
+    StringBuilder message = new StringBuilder();
+    switch(msg) {
+    case MSG_REGION_OPEN:
+      message.append("MSG_REGION_OPEN : ");
+      break;
+      
+    case MSG_REGION_CLOSE:
+      message.append("MSG_REGION_CLOSE : ");
+      break;
+      
+    case MSG_CALL_SERVER_STARTUP:
+      message.append("MSG_CALL_SERVER_STARTUP : ");
+      break;
+      
+    case MSG_REGIONSERVER_STOP:
+      message.append("MSG_REGIONSERVER_STOP : ");
+      break;
+      
+    case MSG_REGION_CLOSE_WITHOUT_REPORT:
+      message.append("MSG_REGION_CLOSE_WITHOUT_REPORT : ");
+      break;
+      
+    case MSG_REPORT_OPEN:
+      message.append("MSG_REPORT_OPEN : ");
+      break;
+      
+    case MSG_REPORT_CLOSE:
+      message.append("MSG_REPORT_CLOSE : ");
+      break;
+      
+    case MSG_NEW_REGION:
+      message.append("MSG_NEW_REGION : ");
+      break;
+      
+    case MSG_REPORT_EXITING:
+      message.append("MSG_REPORT_EXITING : ");
+      break;
+      
+    default:
+      message.append("unknown message code (");
+      message.append(msg);
+      message.append(") : ");
+      break;
+    }
+    message.append(info == null ? "null" : info.toString());
+    return message.toString();
+  }
+  
   //////////////////////////////////////////////////////////////////////////////
   // Writable
   //////////////////////////////////////////////////////////////////////////////
 
-   public void write(DataOutput out) throws IOException {
+   /* (non-Javadoc)
+   * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
+   */
+  public void write(DataOutput out) throws IOException {
      out.writeByte(msg);
      info.write(out);
    }
 
-   public void readFields(DataInput in) throws IOException {
+   /* (non-Javadoc)
+   * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
+   */
+  public void readFields(DataInput in) throws IOException {
      this.msg = in.readByte();
      this.info.readFields(in);
    }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java Mon Jun 11 09:46:27 2007
@@ -55,7 +55,7 @@
  * regionName is a unique identifier for this HRegion. (startKey, endKey]
  * defines the keyspace for this HRegion.
  */
-public class HRegion implements HConstants {
+class HRegion implements HConstants {
   static String SPLITDIR = "splits";
   static String MERGEDIR = "merges";
   static String TMPREGION_PREFIX = "tmpregion_";
@@ -72,7 +72,7 @@
    * @param regionName          - name of the region to delete
    * @throws IOException
    */
-  public static void deleteRegion(FileSystem fs, Path baseDirectory,
+  static void deleteRegion(FileSystem fs, Path baseDirectory,
       Text regionName) throws IOException {
     LOG.debug("Deleting region " + regionName);
     fs.delete(HStoreFile.getHRegionDir(baseDirectory, regionName));
@@ -83,7 +83,7 @@
    * HRegionServer. Returns a brand-new active HRegion, also
    * running on the current HRegionServer.
    */
-  public static HRegion closeAndMerge(HRegion srcA, HRegion srcB) throws IOException {
+  static HRegion closeAndMerge(HRegion srcA, HRegion srcB) throws IOException {
 
     // Make sure that srcA comes first; important for key-ordering during
     // write of the merged file.
@@ -110,7 +110,7 @@
     Configuration conf = srcA.getConf();
     HTableDescriptor tabledesc = srcA.getTableDesc();
     HLog log = srcA.getLog();
-    Path dir = srcA.getDir();
+    Path rootDir = srcA.getRootDir();
 
     Text startKey = srcA.getStartKey();
     Text endKey = srcB.getEndKey();
@@ -222,8 +222,8 @@
 
     // Done
     
-    HRegion dstRegion = new HRegion(dir, log, fs, conf, newRegionInfo,
-        newRegionDir, null);
+    HRegion dstRegion = new HRegion(rootDir, log, fs, conf, newRegionInfo,
+        newRegionDir);
 
     // Get rid of merges directory
     
@@ -234,59 +234,6 @@
     return dstRegion;
   }
 
-  /**
-   * Internal method to create a new HRegion. Used by createTable and by the
-   * bootstrap code in the HMaster constructor
-   * 
-   * @param fs          - file system to create region in
-   * @param dir         - base directory
-   * @param conf        - configuration object
-   * @param desc        - table descriptor
-   * @param regionId    - region id
-   * @param startKey    - first key in region
-   * @param endKey      - last key in region
-   * @return            - new HRegion
-   * @throws IOException
-   */
-  public static HRegion createNewHRegion(FileSystem fs, Path dir,
-      Configuration conf, HTableDescriptor desc, long regionId, Text startKey,
-      Text endKey) throws IOException {
-    
-    HRegionInfo info = new HRegionInfo(regionId, desc, startKey, endKey);
-    Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
-    fs.mkdirs(regionDir);
-
-    return new HRegion(dir,
-      new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf),
-      fs, conf, info, null, null);
-  }
-  
-  /**
-   * Inserts a new table's meta information into the meta table. Used by
-   * the HMaster bootstrap code.
-   * 
-   * @param meta                - HRegion to be updated
-   * @param table               - HRegion of new table
-   * 
-   * @throws IOException
-   */
-  public static void addRegionToMeta(HRegion meta, HRegion table)
-      throws IOException {
-    
-    // The row key is the region name
-    
-    long writeid = meta.startUpdate(table.getRegionName());
-    
-    ByteArrayOutputStream bytes = new ByteArrayOutputStream();
-    DataOutputStream s = new DataOutputStream(bytes);
-
-    table.getRegionInfo().write(s);
-    
-    meta.put(writeid, COL_REGIONINFO, new BytesWritable(bytes.toByteArray()));
-    
-    meta.commit(writeid);
-  }
-  
   //////////////////////////////////////////////////////////////////////////////
   // Members
   //////////////////////////////////////////////////////////////////////////////
@@ -299,7 +246,7 @@
   
   HMemcache memcache;
 
-  Path dir;
+  Path rootDir;
   HLog log;
   FileSystem fs;
   Configuration conf;
@@ -307,10 +254,10 @@
   Path regiondir;
 
   static class WriteState {
-    public volatile boolean writesOngoing;
-    public volatile boolean writesEnabled;
-    public volatile boolean closed;
-    public WriteState() {
+    volatile boolean writesOngoing;
+    volatile boolean writesEnabled;
+    volatile boolean closed;
+    WriteState() {
       this.writesOngoing = true;
       this.writesEnabled = true;
       this.closed = false;
@@ -340,18 +287,22 @@
    * appropriate log info for this HRegion. If there is a previous log file
    * (implying that the HRegion has been written-to before), then read it from
    * the supplied path.
+   * 
+   * @param rootDir root directory for HBase instance
+   * @param log HLog where changes should be committed
    * @param fs is the filesystem.  
-   * @param dir dir is where the HRegion is stored.
    * @param conf is global configuration settings.
+   * @param regionInfo - HRegionInfo that describes the region
    * @param initialFiles If there are initial files (implying that the HRegion
    * is new), then read them from the supplied path.
+   * 
    * @throws IOException
    */
-  public HRegion(Path dir, HLog log, FileSystem fs, Configuration conf, 
-      HRegionInfo regionInfo, Path initialFiles, Path oldLogFile)
+  HRegion(Path rootDir, HLog log, FileSystem fs, Configuration conf, 
+      HRegionInfo regionInfo, Path initialFiles)
   throws IOException {
     
-    this.dir = dir;
+    this.rootDir = rootDir;
     this.log = log;
     this.fs = fs;
     this.conf = conf;
@@ -366,7 +317,8 @@
     // Declare the regionName.  This is a unique string for the region, used to 
     // build a unique filename.
     
-    this.regiondir = HStoreFile.getHRegionDir(dir, this.regionInfo.regionName);
+    this.regiondir = HStoreFile.getHRegionDir(rootDir, this.regionInfo.regionName);
+    Path oldLogFile = new Path(regiondir, HREGION_OLDLOGFILE_NAME);
 
     // Move prefab HStore files into place (if any)
     
@@ -378,7 +330,7 @@
     for(Map.Entry<Text, HColumnDescriptor> e :
         this.regionInfo.tableDesc.families().entrySet()) {
       Text colFamily = HStoreKey.extractFamily(e.getKey());
-      stores.put(colFamily, new HStore(dir, this.regionInfo.regionName,
+      stores.put(colFamily, new HStore(rootDir, this.regionInfo.regionName,
           e.getValue(), fs, oldLogFile, conf));
     }
 
@@ -411,12 +363,12 @@
   }
 
   /** Returns a HRegionInfo object for this region */
-  public HRegionInfo getRegionInfo() {
+  HRegionInfo getRegionInfo() {
     return this.regionInfo;
   }
 
   /** returns true if region is closed */
-  public boolean isClosed() {
+  boolean isClosed() {
     boolean closed = false;
     synchronized(writestate) {
       closed = writestate.closed;
@@ -434,7 +386,7 @@
    * This method could take some time to execute, so don't call it from a 
    * time-sensitive thread.
    */
-  public Vector<HStoreFile> close() throws IOException {
+  Vector<HStoreFile> close() throws IOException {
     lock.obtainWriteLock();
     try {
       boolean shouldClose = false;
@@ -483,7 +435,7 @@
    *
    * Returns two brand-new (and open) HRegions
    */
-  public HRegion[] closeAndSplit(Text midKey, RegionUnavailableListener listener)
+  HRegion[] closeAndSplit(Text midKey, RegionUnavailableListener listener)
       throws IOException {
     
     if(((regionInfo.startKey.getLength() != 0)
@@ -572,9 +524,9 @@
 
     // Done
 
-    HRegion regionA = new HRegion(dir, log, fs, conf, regionAInfo, dirA, null);
+    HRegion regionA = new HRegion(rootDir, log, fs, conf, regionAInfo, dirA);
     
-    HRegion regionB = new HRegion(dir, log, fs, conf, regionBInfo, dirB, null);
+    HRegion regionB = new HRegion(rootDir, log, fs, conf, regionBInfo, dirB);
 
     // Cleanup
 
@@ -596,43 +548,43 @@
   // HRegion accessors
   //////////////////////////////////////////////////////////////////////////////
 
-  public Text getStartKey() {
+  Text getStartKey() {
     return regionInfo.startKey;
   }
   
-  public Text getEndKey() {
+  Text getEndKey() {
     return regionInfo.endKey;
   }
   
-  public long getRegionId() {
+  long getRegionId() {
     return regionInfo.regionId;
   }
 
-  public Text getRegionName() {
+  Text getRegionName() {
     return regionInfo.regionName;
   }
   
-  public Path getDir() {
-    return dir;
+  Path getRootDir() {
+    return rootDir;
   }
  
-  public HTableDescriptor getTableDesc() {
+  HTableDescriptor getTableDesc() {
     return regionInfo.tableDesc;
   }
   
-  public HLog getLog() {
+  HLog getLog() {
     return log;
   }
   
-  public Configuration getConf() {
+  Configuration getConf() {
     return conf;
   }
   
-  public Path getRegionDir() {
+  Path getRegionDir() {
     return regiondir;
   }
   
-  public FileSystem getFilesystem() {
+  FileSystem getFilesystem() {
     return fs;
   }
 
@@ -652,7 +604,7 @@
    * @param midKey      - (return value) midKey of the largest MapFile
    * @return            - true if the region should be split
    */
-  public boolean needsSplit(Text midKey) {
+  boolean needsSplit(Text midKey) {
     lock.obtainReadLock();
     try {
       Text key = new Text();
@@ -675,7 +627,7 @@
   /**
    * @return - returns the size of the largest HStore
    */
-  public long largestHStore() {
+  long largestHStore() {
     long maxsize = 0;
     lock.obtainReadLock();
     try {
@@ -697,7 +649,7 @@
   /**
    * @return true if the region should be compacted.
    */
-  public boolean needsCompaction() {
+  boolean needsCompaction() {
     boolean needsCompaction = false;
     this.lock.obtainReadLock();
     try {
@@ -726,7 +678,7 @@
    * HRegion is busy doing something else storage-intensive (like flushing the 
    * cache).  The caller should check back later.
    */
-  public boolean compactStores() throws IOException {
+  boolean compactStores() throws IOException {
     boolean shouldCompact = false;
     lock.obtainReadLock();
     try {
@@ -766,7 +718,7 @@
    * Each HRegion is given a periodic chance to flush the cache, which it should
    * only take if there have been a lot of uncommitted writes.
    */
-  public void optionallyFlush() throws IOException {
+  void optionallyFlush() throws IOException {
     if(commitsSinceFlush > maxUnflushedEntries) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Flushing cache. Number of commits is: " + commitsSinceFlush);
@@ -792,8 +744,7 @@
    * This method may block for some time, so it should not be called from a 
    * time-sensitive thread.
    */
-  public Vector<HStoreFile> flushcache(boolean disableFutureWrites)
-  throws IOException {
+  Vector<HStoreFile> flushcache(boolean disableFutureWrites) throws IOException {
     boolean shouldFlush = false;
     synchronized(writestate) {
       if((! writestate.writesOngoing)
@@ -934,18 +885,18 @@
   //////////////////////////////////////////////////////////////////////////////
 
   /** Fetch a single data item. */
-  public BytesWritable get(Text row, Text column) throws IOException {
+  BytesWritable get(Text row, Text column) throws IOException {
     BytesWritable[] results = get(row, column, Long.MAX_VALUE, 1);
     return (results == null)? null: results[0];
   }
   
   /** Fetch multiple versions of a single data item */
-  public BytesWritable[] get(Text row, Text column, int numVersions) throws IOException {
+  BytesWritable[] get(Text row, Text column, int numVersions) throws IOException {
     return get(row, column, Long.MAX_VALUE, numVersions);
   }
 
   /** Fetch multiple versions of a single data item, with timestamp. */
-  public BytesWritable[] get(Text row, Text column, long timestamp, int numVersions) 
+  BytesWritable[] get(Text row, Text column, long timestamp, int numVersions) 
       throws IOException {
     
     if(writestate.closed) {
@@ -969,8 +920,7 @@
     }
   }
 
-  // Private implementation: get the value for the indicated HStoreKey
-
+  /** Private implementation: get the value for the indicated HStoreKey */
   private BytesWritable[] get(HStoreKey key, int numVersions) throws IOException {
 
     lock.obtainReadLock();
@@ -1007,7 +957,7 @@
    * determine which column groups are useful for that row.  That would let us 
    * avoid a bunch of disk activity.
    */
-  public TreeMap<Text, BytesWritable> getFull(Text row) throws IOException {
+  TreeMap<Text, BytesWritable> getFull(Text row) throws IOException {
     HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
 
     lock.obtainReadLock();
@@ -1029,7 +979,7 @@
    * Return an iterator that scans over the HRegion, returning the indicated 
    * columns.  This Iterator must be closed by the caller.
    */
-  public HInternalScannerInterface getScanner(Text[] cols, Text firstRow)
+  HInternalScannerInterface getScanner(Text[] cols, Text firstRow)
   throws IOException {
     lock.obtainReadLock();
     try {
@@ -1067,7 +1017,7 @@
    * @return lockid
    * @see #put(long, Text, BytesWritable)
    */
-  public long startUpdate(Text row) throws IOException {
+  long startUpdate(Text row) throws IOException {
     // We obtain a per-row lock, so other clients will block while one client
     // performs an update.  The read lock is released by the client calling
     // #commit or #abort or if the HRegionServer lease on the lock expires.
@@ -1085,8 +1035,7 @@
    * This method really just tests the input, then calls an internal localput() 
    * method.
    */
-  public void put(long lockid, Text targetCol, BytesWritable val)
-  throws IOException {
+  void put(long lockid, Text targetCol, BytesWritable val) throws IOException {
     if(val.getSize() == DELETE_BYTES.getSize()
         && val.compareTo(DELETE_BYTES) == 0) {
       throw new IOException("Cannot insert value: " + val);
@@ -1097,11 +1046,11 @@
   /**
    * Delete a value or write a value. This is a just a convenience method for put().
    */
-  public void delete(long lockid, Text targetCol) throws IOException {
+  void delete(long lockid, Text targetCol) throws IOException {
     localput(lockid, targetCol, DELETE_BYTES);
   }
 
-  /*
+  /**
    * Private implementation.
    * 
    * localput() is used for both puts and deletes. We just place the values
@@ -1148,7 +1097,7 @@
    * writes associated with the given row-lock.  These values have not yet
    * been placed in memcache or written to the log.
    */
-  public void abort(long lockid) throws IOException {
+  void abort(long lockid) throws IOException {
     Text row = getRowFromLock(lockid);
     if(row == null) {
       throw new LockException("No write lock for lockid " + lockid);
@@ -1182,7 +1131,7 @@
    * @param lockid Lock for row we're to commit.
    * @throws IOException
    */
-  public void commit(final long lockid) throws IOException {
+  void commit(final long lockid) throws IOException {
     // Remove the row from the pendingWrites list so 
     // that repeated executions won't screw this up.
     Text row = getRowFromLock(lockid);
@@ -1286,7 +1235,8 @@
     }
   }
   
-  /** Release the row lock!
+  /** 
+   * Release the row lock!
    * @param lock Name of row whose lock we are to release
    */
   void releaseRowLock(Text row) {
@@ -1309,7 +1259,7 @@
     }
   }
   
-  /*
+  /**
    * HScanner is an iterator through a bunch of rows in an HRegion.
    */
   private static class HScanner implements HInternalScannerInterface {
@@ -1321,7 +1271,7 @@
 
     /** Create an HScanner with a handle on many HStores. */
     @SuppressWarnings("unchecked")
-    public HScanner(Text[] cols, Text firstRow, HMemcache memcache, HStore[] stores)
+    HScanner(Text[] cols, Text firstRow, HMemcache memcache, HStore[] stores)
     throws IOException {  
       long scanTime = System.currentTimeMillis();
       this.scanners = new HInternalScannerInterface[stores.length + 1];
@@ -1391,9 +1341,12 @@
       return multipleMatchers;
     }
     
-    /**
+    /* (non-Javadoc)
+     * 
      * Grab the next row's worth of values.  The HScanner will return the most 
      * recent data value for each row that is not newer than the target time.
+     *
+     * @see org.apache.hadoop.hbase.HInternalScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
      */
     public boolean next(HStoreKey key, TreeMap<Text, BytesWritable> results)
     throws IOException {
@@ -1477,7 +1430,9 @@
       }
     }
 
-    /** All done with the scanner. */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.HInternalScannerInterface#close()
+     */
     public void close() {
       for(int i = 0; i < scanners.length; i++) {
         if(scanners[i] != null) {
@@ -1493,16 +1448,17 @@
    * Convenience method creating new HRegions.
    * @param regionId ID to use
    * @param tableDesc Descriptor
-   * @param dir Home directory for the new region.
+   * @param rootDir Root directory of HBase instance
    * @param conf
    * @return New META region (ROOT or META).
    * @throws IOException
    */
-  public static HRegion createHRegion(final long regionId,
-    final HTableDescriptor tableDesc, final Path dir, final Configuration conf)
+  static HRegion createHRegion(final long regionId,
+    final HTableDescriptor tableDesc, final Path rootDir,
+    final Configuration conf)
   throws IOException {
     return createHRegion(new HRegionInfo(regionId, tableDesc, null, null),
-      dir, conf, null, null);
+      rootDir, conf, null);
   }
   
   /**
@@ -1510,25 +1466,22 @@
    * bootstrap code in the HMaster constructor
    * 
    * @param info Info for region to create.
-   * @param dir Home dir for new region
+   * @param rootDir Root directory for HBase instance
    * @param conf
    * @param initialFiles InitialFiles to pass new HRegion. Pass null if none.
-   * @param oldLogFile Old log file to use in region initialization.  Pass null
-   * if none. 
    * @return new HRegion
    * 
    * @throws IOException
    */
-  public static HRegion createHRegion(final HRegionInfo info,
-    final Path dir, final Configuration conf, final Path initialFiles,
-    final Path oldLogFile) 
+  static HRegion createHRegion(final HRegionInfo info,
+    final Path rootDir, final Configuration conf, final Path initialFiles)
   throws IOException {
-    Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
+    Path regionDir = HStoreFile.getHRegionDir(rootDir, info.regionName);
     FileSystem fs = FileSystem.get(conf);
     fs.mkdirs(regionDir);
-    return new HRegion(dir,
+    return new HRegion(rootDir,
       new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf),
-      fs, conf, info, initialFiles, oldLogFile);
+      fs, conf, info, initialFiles);
   }
   
   /**
@@ -1541,7 +1494,7 @@
    *
    * @throws IOException
    */
-  public static void addRegionToMETA(HRegion meta, HRegion r)
+  static void addRegionToMETA(HRegion meta, HRegion r)
   throws IOException {  
     // The row key is the region name
     long writeid = meta.startUpdate(r.getRegionName());
@@ -1552,7 +1505,7 @@
     meta.commit(writeid);
   }
   
-  public static void addRegionToMETA(final HClient client,
+  static void addRegionToMETA(final HClient client,
       final Text table, final HRegion region,
       final HServerAddress serverAddress,
       final long startCode)
@@ -1578,7 +1531,7 @@
    * @param regionName Region to remove.
    * @throws IOException
    */
-  public static void removeRegionFromMETA(final HClient client,
+  static void removeRegionFromMETA(final HClient client,
       final Text table, final Text regionName)
   throws IOException {
     client.openTable(table);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java Mon Jun 11 09:46:27 2007
@@ -20,11 +20,9 @@
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.util.Map;
 
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableComparable;
 
 /**
  * HRegion information.
@@ -32,13 +30,14 @@
  * HRegions' table descriptor, etc.
  */
 public class HRegionInfo implements WritableComparable {
-  public Text regionName;
-  public long regionId;
-  public Text startKey;
-  public Text endKey;
-  public boolean offLine;
-  public HTableDescriptor tableDesc;
+  Text regionName;
+  long regionId;
+  Text startKey;
+  Text endKey;
+  boolean offLine;
+  HTableDescriptor tableDesc;
   
+  /** Default constructor - creates empty object */
   public HRegionInfo() {
     this.regionId = 0;
     this.tableDesc = new HTableDescriptor();
@@ -48,14 +47,28 @@
     this.offLine = false;
   }
   
+  /**
+   * Construct a HRegionInfo object from byte array
+   * 
+   * @param serializedBytes
+   * @throws IOException
+   */
   public HRegionInfo(final byte [] serializedBytes) throws IOException {
     this();
     readFields(new DataInputStream(new ByteArrayInputStream(serializedBytes)));
   }
 
-  public HRegionInfo(long regionId, HTableDescriptor tableDesc,
-      Text startKey, Text endKey)
-  throws IllegalArgumentException {
+  /**
+   * Construct HRegionInfo with explicit parameters
+   * 
+   * @param regionId    - the regionid
+   * @param tableDesc   - the table descriptor
+   * @param startKey    - first key in region
+   * @param endKey      - end of key range
+   * @throws IllegalArgumentException
+   */
+  public HRegionInfo(long regionId, HTableDescriptor tableDesc, Text startKey,
+      Text endKey) throws IllegalArgumentException {
     
     this.regionId = regionId;
     

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java Mon Jun 11 09:46:27 2007
@@ -26,18 +26,67 @@
  * a handle to the HRegionInterface.
  ******************************************************************************/
 public interface HRegionInterface extends VersionedProtocol {
-  public static final long versionID = 1L; // initial version
+  /** initial version */
+  public static final long versionID = 1L;
 
-  // Get metainfo about an HRegion
-
-  public HRegionInfo getRegionInfo(Text regionName) throws NotServingRegionException;
+  /** 
+   * Get metainfo about an HRegion
+   * 
+   * @param regionName                  - name of the region
+   * @return                            - HRegionInfo object for region
+   * @throws NotServingRegionException
+   */
+  public HRegionInfo getRegionInfo(final Text regionName) throws NotServingRegionException;
 
-  // GET methods for an HRegion.
+  /**
+   * Retrieve a single value from the specified region for the specified row
+   * and column keys
+   * 
+   * @param regionName  - name of region
+   * @param row         - row key
+   * @param column      - column key
+   * @return            - value for that region/row/column
+   * @throws IOException
+   */
+  public BytesWritable get(final Text regionName, final Text row, final Text column) throws IOException;
 
-  public BytesWritable get(Text regionName, Text row, Text column) throws IOException;
-  public BytesWritable[] get(Text regionName, Text row, Text column, int numVersions) throws IOException;
-  public BytesWritable[] get(Text regionName, Text row, Text column, long timestamp, int numVersions) throws IOException;
-  public LabelledData[] getRow(Text regionName, Text row) throws IOException;
+  /**
+   * Get the specified number of versions of the specified row and column
+   * 
+   * @param regionName  - region name
+   * @param row         - row key
+   * @param column      - column key
+   * @param numVersions - number of versions to return
+   * @return            - array of values
+   * @throws IOException
+   */
+  public BytesWritable[] get(final Text regionName, final Text row,
+      final Text column, final int numVersions) throws IOException;
+  
+  /**
+   * Get the specified number of versions of the specified row and column with
+   * the specified timestamp.
+   *
+   * @param regionName  - region name
+   * @param row         - row key
+   * @param column      - column key
+   * @param timestamp   - timestamp
+   * @param numVersions - number of versions to return
+   * @return            - array of values
+   * @throws IOException
+   */
+  public BytesWritable[] get(final Text regionName, final Text row, final Text column,
+      final long timestamp, final int numVersions) throws IOException;
+  
+  /**
+   * Get all the data for the specified row
+   * 
+   * @param regionName  - region name
+   * @param row         - row key
+   * @return            - array of values
+   * @throws IOException
+   */
+  public KeyedData[] getRow(final Text regionName, final Text row) throws IOException;
 
   //////////////////////////////////////////////////////////////////////////////
   // Start an atomic row insertion/update.  No changes are committed until the 
@@ -50,11 +99,80 @@
   // The client can gain extra time with a call to renewLease().
   //////////////////////////////////////////////////////////////////////////////
 
-  public long startUpdate(Text regionName, long clientid, Text row) throws IOException;
-  public void put(Text regionName, long clientid, long lockid, Text column, BytesWritable val) throws IOException;
-  public void delete(Text regionName, long clientid, long lockid, Text column) throws IOException;
-  public void abort(Text regionName, long clientid, long lockid) throws IOException;
-  public void commit(Text regionName, long clientid, long lockid) throws IOException;
+  /** 
+   * Start an atomic row insertion/update.  No changes are committed until the 
+   * call to commit() returns. A call to abort() will abandon any updates in progress.
+   *
+   * Callers to this method are given a lease for each unique lockid; before the
+   * lease expires, either abort() or commit() must be called. If it is not 
+   * called, the system will automatically call abort() on the client's behalf.
+   *
+   * The client can gain extra time with a call to renewLease().
+   * Start an atomic row insertion or update
+   * 
+   * @param regionName  - region name
+   * @param clientid    - a unique value to identify the client
+   * @param row         - Name of row to start update against.
+   * @return Row lockid.
+   * @throws IOException
+   */
+  public long startUpdate(final Text regionName, final long clientid,
+      final Text row) throws IOException;
+  
+  /** 
+   * Change a value for the specified column
+   *
+   * @param regionName          - region name
+   * @param clientid            - a unique value to identify the client
+   * @param lockid              - lock id returned from startUpdate
+   * @param column              - column whose value is being set
+   * @param val                 - new value for column
+   * @throws IOException
+   */
+  public void put(final Text regionName, final long clientid, final long lockid,
+      final Text column, final BytesWritable val) throws IOException;
+  
+  /** 
+   * Delete the value for a column
+   *
+   * @param regionName          - region name
+   * @param clientid            - a unique value to identify the client
+   * @param lockid              - lock id returned from startUpdate
+   * @param column              - name of column whose value is to be deleted
+   * @throws IOException
+   */
+  public void delete(final Text regionName, final long clientid, final long lockid,
+      final Text column) throws IOException;
+  
+  /** 
+   * Abort a row mutation
+   *
+   * @param regionName          - region name
+   * @param clientid            - a unique value to identify the client
+   * @param lockid              - lock id returned from startUpdate
+   * @throws IOException
+   */
+  public void abort(final Text regionName, final long clientid, 
+      final long lockid) throws IOException;
+  
+  /** 
+   * Finalize a row mutation
+   *
+   * @param regionName          - region name
+   * @param clientid            - a unique value to identify the client
+   * @param lockid              - lock id returned from startUpdate
+   * @throws IOException
+   */
+  public void commit(final Text regionName, final long clientid,
+      final long lockid) throws IOException;
+  
+  /**
+   * Renew lease on update
+   * 
+   * @param lockid              - lock id returned from startUpdate
+   * @param clientid            - a unique value to identify the client
+   * @throws IOException
+   */
   public void renewLease(long lockid, long clientid) throws IOException;
 
   //////////////////////////////////////////////////////////////////////////////
@@ -77,11 +195,10 @@
    * Get the next set of values
    * 
    * @param scannerId   - clientId passed to openScanner
-   * @param key         - the next HStoreKey
-   * @return            - true if a value was retrieved
+   * @return            - array of values
    * @throws IOException
    */
-  public LabelledData[] next(long scannerId, HStoreKey key) throws IOException;
+  public KeyedData[] next(long scannerId) throws IOException;
   
   /**
    * Close a scanner

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java Mon Jun 11 09:46:27 2007
@@ -19,9 +19,10 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.Map;
 import java.util.Random;
+import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.Vector;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -42,9 +43,11 @@
  * HRegionServer makes a set of HRegions available to clients.  It checks in with
  * the HMaster. There are many HRegionServers in a single HBase deployment.
  ******************************************************************************/
-public class HRegionServer
-    implements HConstants, HRegionInterface, Runnable {
+public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.ipc.VersionedProtocol#getProtocolVersion(java.lang.String, long)
+   */
   public long getProtocolVersion(final String protocol, 
       @SuppressWarnings("unused") final long clientVersion)
   throws IOException { 
@@ -57,18 +60,20 @@
   static final Log LOG = LogFactory.getLog(HRegionServer.class);
   
   volatile boolean stopRequested;
-  private Path regionDir;
+  volatile boolean abortRequested;
+  private Path rootDir;
   HServerInfo info;
   Configuration conf;
   private Random rand;
   
   // region name -> HRegion
-  TreeMap<Text, HRegion> onlineRegions = new TreeMap<Text, HRegion>();
+  SortedMap<Text, HRegion> onlineRegions;
   Map<Text, HRegion> retiringRegions = new HashMap<Text, HRegion>();
   
   final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   private Vector<HMsg> outboundMsgs;
 
+  int numRetries;
   long threadWakeFrequency;
   private long msgInterval;
   
@@ -78,20 +83,24 @@
   private Thread splitOrCompactCheckerThread;
   Integer splitOrCompactLock = Integer.valueOf(0);
   
-  /*
+  /**
    * Interface used by the {@link org.apache.hadoop.io.retry} mechanism.
    */
-  interface UpdateMetaInterface {
-    /*
+  public interface UpdateMetaInterface {
+    /**
      * @return True if succeeded.
      * @throws IOException
      */
-   boolean update() throws IOException;
+   public boolean update() throws IOException;
   }
-  
+
+  /** Runs periodically to determine if regions need to be compacted or split */
   class SplitOrCompactChecker implements Runnable, RegionUnavailableListener {
     HClient client = new HClient(conf);
   
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.RegionUnavailableListener#closing(org.apache.hadoop.io.Text)
+     */
     public void closing(final Text regionName) {
       lock.writeLock().lock();
       try {
@@ -106,6 +115,9 @@
       }
     }
     
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.RegionUnavailableListener#closed(org.apache.hadoop.io.Text)
+     */
     public void closed(final Text regionName) {
       lock.writeLock().lock();
       try {
@@ -118,6 +130,9 @@
       }
     }
 
+    /* (non-Javadoc)
+     * @see java.lang.Runnable#run()
+     */
     public void run() {
       while(! stopRequested) {
         long startTime = System.currentTimeMillis();
@@ -180,7 +195,7 @@
       // splitting a 'normal' region, and the ROOT table needs to be
       // updated if we are splitting a META region.
       final Text tableToUpdate =
-        (oldRegion.find(META_TABLE_NAME.toString()) == 0) ?
+        region.getRegionInfo().tableDesc.getName().equals(META_TABLE_NAME) ?
             ROOT_TABLE_NAME : META_TABLE_NAME;
       if(LOG.isDebugEnabled()) {
         LOG.debug("Updating " + tableToUpdate + " with region split info");
@@ -188,6 +203,9 @@
       
       // Wrap the update of META region with an org.apache.hadoop.io.retry.
       UpdateMetaInterface implementation = new UpdateMetaInterface() {
+        /* (non-Javadoc)
+         * @see org.apache.hadoop.hbase.HRegionServer.UpdateMetaInterface#update()
+         */
         public boolean update() throws IOException {
           HRegion.removeRegionFromMETA(client, tableToUpdate,
             region.getRegionName());
@@ -232,7 +250,11 @@
   private Flusher cacheFlusher;
   private Thread cacheFlusherThread;
   Integer cacheFlusherLock = Integer.valueOf(0);
+  /** Runs periodically to flush the memcache */
   class Flusher implements Runnable {
+    /* (non-Javadoc)
+     * @see java.lang.Runnable#run()
+     */
     public void run() {
       while(! stopRequested) {
         long startTime = System.currentTimeMillis();
@@ -283,21 +305,22 @@
   // File paths
   
   private FileSystem fs;
-  private Path oldlogfile;
   
   // Logging
+  
   HLog log;
   private LogRoller logRoller;
   private Thread logRollerThread;
   Integer logRollerLock = Integer.valueOf(0);
   
-  /**
-   * Log rolling Runnable.
-   */
+  /** Runs periodically to determine if the log should be rolled */
   class LogRoller implements Runnable {
     private int maxLogEntries =
       conf.getInt("hbase.regionserver.maxlogentries", 30 * 1000);
     
+    /* (non-Javadoc)
+     * @see java.lang.Runnable#run()
+     */
     public void run() {
       while(! stopRequested) {
         synchronized(logRollerLock) {
@@ -339,48 +362,61 @@
   // Leases
   private Leases leases;
 
-  /** Start a HRegionServer at the default location */
+  /**
+   * Starts a HRegionServer at the default location
+   * @param conf
+   * @throws IOException
+   */
   public HRegionServer(Configuration conf) throws IOException {
-    this(new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR)),
+    this(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)),
         new HServerAddress(conf.get(REGIONSERVER_ADDRESS, "localhost:0")),
         conf);
   }
   
-  /** Start a HRegionServer at an indicated location */
-  public HRegionServer(Path regionDir, HServerAddress address,
+  /**
+   * Starts a HRegionServer at the specified location
+   * @param rootDir
+   * @param address
+   * @param conf
+   * @throws IOException
+   */
+  public HRegionServer(Path rootDir, HServerAddress address,
       Configuration conf) throws IOException {
     
     // Basic setup
     this.stopRequested = false;
-    this.regionDir = regionDir;
+    this.abortRequested = false;
+    this.rootDir = rootDir;
     this.conf = conf;
     this.rand = new Random();
+    this.onlineRegions =
+      Collections.synchronizedSortedMap(new TreeMap<Text, HRegion>());
+    
     this.outboundMsgs = new Vector<HMsg>();
     this.scanners =
       Collections.synchronizedMap(new TreeMap<Text, HInternalScannerInterface>());
 
     // Config'ed params
+    this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
     this.threadWakeFrequency = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 1000);
     this.msgInterval = conf.getLong("hbase.regionserver.msginterval",
       15 * 1000);
     this.splitOrCompactCheckFrequency =
       conf.getLong("hbase.regionserver.thread.splitcompactcheckfrequency",
       60 * 1000);
-    
+
     // Cache flushing
     this.cacheFlusher = new Flusher();
-    this.cacheFlusherThread =
-      new Thread(cacheFlusher, "HRegionServer.cacheFlusher");
+    this.cacheFlusherThread = new Thread(cacheFlusher);
     
     // Check regions to see if they need to be split
     this.splitOrCompactChecker = new SplitOrCompactChecker();
-    this.splitOrCompactCheckerThread =
-      new Thread(splitOrCompactChecker, "HRegionServer.splitOrCompactChecker");
+    this.splitOrCompactCheckerThread = new Thread(splitOrCompactChecker);
     
     // Process requests from Master
-    this.toDo = new Vector<HMsg>();
+    this.toDo = new LinkedList<ToDoEntry>();
     this.worker = new Worker();
-    this.workerThread = new Thread(worker, "HRegionServer.worker");
+    this.workerThread = new Thread(worker);
 
     try {
       // Server to handle client requests
@@ -398,20 +434,19 @@
         this.info.getServerAddress().getBindAddress() + "_"
         + this.info.getServerAddress().getPort();
       
-      Path newlogdir = new Path(regionDir, "log" + "_" + serverName);
-      this.oldlogfile = new Path(regionDir, "oldlogfile" + "_" + serverName);
+      Path logdir = new Path(rootDir, "log" + "_" + serverName);
 
       // Logging
       
       this.fs = FileSystem.get(conf);
-      HLog.consolidateOldLog(newlogdir, oldlogfile, fs, conf);
-      // TODO: Now we have a consolidated log for all regions, sort and
-      // then split result by region passing the splits as reconstruction
-      // logs to HRegions on start. Or, rather than consolidate, split logs
-      // into per region files.
-      this.log = new HLog(fs, newlogdir, conf);
+      if(fs.exists(logdir)) {
+        throw new RegionServerRunningException("region server already running at "
+            + this.info.getServerAddress().toString());
+      }
+      
+      this.log = new HLog(fs, logdir, conf);
       this.logRoller = new LogRoller();
-      this.logRollerThread = new Thread(logRoller, "HRegionServer.logRoller");
+      this.logRollerThread = new Thread(logRoller);
 
       // Remote HMaster
       
@@ -420,40 +455,37 @@
           new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
           conf);
 
-      // Threads
-      
-      this.workerThread.start();
-      this.cacheFlusherThread.start();
-      this.splitOrCompactCheckerThread.start();
-      this.logRollerThread.start();
-      this.leases = new Leases(conf.getLong("hbase.regionserver.lease.period", 
-          3 * 60 * 1000), threadWakeFrequency);
-      
-      // Server
-
-      this.server.start();
-
     } catch(IOException e) {
       this.stopRequested = true;
       throw e;
     }
-    
-    LOG.info("HRegionServer started at: " + address.toString());
   }
 
   /**
-   * Set a flag that will cause all the HRegionServer threads to shut down
+   * Sets a flag that will cause all the HRegionServer threads to shut down
    * in an orderly fashion.
    */
-  public synchronized void stop() {
+  synchronized void stop() {
     stopRequested = true;
     notifyAll();                        // Wakes run() if it is sleeping
   }
+  
+  /**
+   * Cause the server to exit without closing the regions it is serving, the
+   * log it is using and without notifying the master.
+   * 
+   * FOR DEBUGGING ONLY
+   */
+  synchronized void abort() {
+    abortRequested = true;
+    stop();
+  }
 
-  /** Wait on all threads to finish.
+  /** 
+   * Wait on all threads to finish.
    * Presumption is that all closes and stops have already been called.
    */
-  public void join() {
+  void join() {
     try {
       this.workerThread.join();
     } catch(InterruptedException iex) {
@@ -489,6 +521,33 @@
    * load/unload instructions.
    */
   public void run() {
+    
+    // Threads
+    
+    String threadName = Thread.currentThread().getName();
+
+    workerThread.setName(threadName + ".worker");
+    workerThread.start();
+    cacheFlusherThread.setName(threadName + ".cacheFlusher");
+    cacheFlusherThread.start();
+    splitOrCompactCheckerThread.setName(threadName + ".splitOrCompactChecker");
+    splitOrCompactCheckerThread.start();
+    logRollerThread.setName(threadName + ".logRoller");
+    logRollerThread.start();
+    leases = new Leases(conf.getLong("hbase.regionserver.lease.period", 
+        3 * 60 * 1000), threadWakeFrequency);
+    
+    // Server
+
+    try {
+      this.server.start();
+      LOG.info("HRegionServer started at: " + info.getServerAddress().toString());
+      
+    } catch(IOException e) {
+      LOG.error(e);
+      stopRequested = true;
+    }
+
     while(! stopRequested) {
       long lastMsg = 0;
       long waitTime;
@@ -545,7 +604,6 @@
                   if (LOG.isDebugEnabled()) {
                     LOG.debug("Got call server startup message");
                   }
-                  toDo.clear();
                   closeAllRegions();
                   restart = true;
                   break;
@@ -554,8 +612,6 @@
                   if (LOG.isDebugEnabled()) {
                     LOG.debug("Got regionserver stop message");
                   }
-                  toDo.clear();
-                  closeAllRegions();
                   stopRequested = true;
                   break;
                   
@@ -563,19 +619,21 @@
                   if (LOG.isDebugEnabled()) {
                     LOG.debug("Got default message");
                   }
-                  toDo.add(msgs[i]);
+                  toDo.addLast(new ToDoEntry(msgs[i]));
                 }
               }
               
+              if(restart || stopRequested) {
+                toDo.clear();
+                break;
+              }
+              
               if(toDo.size() > 0) {
                 if (LOG.isDebugEnabled()) {
                   LOG.debug("notify on todo");
                 }
                 toDo.notifyAll();
               }
-              if(restart || stopRequested) {
-                break;
-              }
             }
 
           } catch(IOException e) {
@@ -596,41 +654,65 @@
         }
       }
     }
-    try {
-      HMsg[] exitMsg = { new HMsg(HMsg.MSG_REPORT_EXITING) };
-      hbaseMaster.regionServerReport(info, exitMsg);
-      
-    } catch(IOException e) {
-      LOG.warn(e);
+    this.worker.stop();
+    this.server.stop();
+    leases.close();
+    
+    // Send interrupts to wake up threads if sleeping so they notice shutdown.
+
+    synchronized(logRollerLock) {
+      this.logRollerThread.interrupt();
+    }
+
+    synchronized(cacheFlusherLock) {
+      this.cacheFlusherThread.interrupt();
     }
-    try {
-      LOG.info("stopping server at: " + info.getServerAddress().toString());
 
-      // Send interrupts to wake up threads if sleeping so they notice shutdown.
+    synchronized(splitOrCompactLock) {
+      this.splitOrCompactCheckerThread.interrupt();
+    }
 
-      synchronized(logRollerLock) {
-        this.logRollerThread.interrupt();
+    if(abortRequested) {
+      try {
+        log.rollWriter();
+        
+      } catch(IOException e) {
+        LOG.warn(e);
       }
+      LOG.info("aborting server at: " + info.getServerAddress().toString());
       
-      synchronized(cacheFlusherLock) {
-        this.cacheFlusherThread.interrupt();
+    } else {
+      Vector<HRegion> closedRegions = closeAllRegions();
+      try {
+        log.closeAndDelete();
+
+      } catch(IOException e) {
+        LOG.error(e);
       }
-      
-      synchronized(splitOrCompactLock) {
-        this.splitOrCompactCheckerThread.interrupt();
+      try {
+        HMsg[] exitMsg = new HMsg[closedRegions.size() + 1];
+        exitMsg[0] = new HMsg(HMsg.MSG_REPORT_EXITING);
+
+        // Tell the master what regions we are/were serving
+
+        int i = 1;
+        for(HRegion region: closedRegions) {
+          exitMsg[i++] = new HMsg(HMsg.MSG_REPORT_CLOSE, region.getRegionInfo());
+        }
+
+        LOG.info("telling master that region server is shutting down at: "
+            +info.getServerAddress().toString());
+        
+        hbaseMaster.regionServerReport(info, exitMsg);
+
+      } catch(IOException e) {
+        LOG.warn(e);
       }
-      
-      this.worker.stop();
-      this.server.stop();
+      LOG.info("stopping server at: " + info.getServerAddress().toString());
+    }
 
-      closeAllRegions();
-      log.close();
-      leases.close();
-      join();
+    join();
       
-    } catch(IOException e) {
-      LOG.error(e);
-    }
     if(LOG.isDebugEnabled()) {
       LOG.debug("main thread exiting");
     }
@@ -650,7 +732,7 @@
     }
   }
   
-  /** 
+  /**
    * Add to the outbound message buffer
    * 
    * When a region splits, we need to tell the master that there are two new 
@@ -671,89 +753,84 @@
   // HMaster-given operations
   //////////////////////////////////////////////////////////////////////////////
 
-  Vector<HMsg> toDo;
+  private static class ToDoEntry {
+    int tries;
+    HMsg msg;
+    ToDoEntry(HMsg msg) {
+      this.tries = 0;
+      this.msg = msg;
+    }
+  }
+  LinkedList<ToDoEntry> toDo;
   private Worker worker;
   private Thread workerThread;
+  /** Thread that performs long running requests from the master */
   class Worker implements Runnable {
-    public void stop() {
+    void stop() {
       synchronized(toDo) {
         toDo.notifyAll();
       }
     }
     
+    /* (non-Javadoc)
+     * @see java.lang.Runnable#run()
+     */
     public void run() {
-      for(HMsg msg = null; !stopRequested; ) {
+      for(ToDoEntry e = null; !stopRequested; ) {
         synchronized(toDo) {
           while(toDo.size() == 0 && !stopRequested) {
             try {
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Wait on todo");
               }
-              toDo.wait();
+              toDo.wait(threadWakeFrequency);
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Wake on todo");
               }
-            } catch(InterruptedException e) {
+            } catch(InterruptedException ex) {
               // continue
             }
           }
           if(stopRequested) {
             continue;
           }
-          msg = toDo.remove(0);
+          e = toDo.removeFirst();
         }
         
         try {
-          switch(msg.getMsg()) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(e.msg.toString());
+          }
+          
+          switch(e.msg.getMsg()) {
 
           case HMsg.MSG_REGION_OPEN:                    // Open a region
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MSG_REGION_OPEN");
-            }
-            openRegion(msg.getRegionInfo());
+            openRegion(e.msg.getRegionInfo());
             break;
 
           case HMsg.MSG_REGION_CLOSE:                   // Close a region
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MSG_REGION_CLOSE");
-            }
-            closeRegion(msg.getRegionInfo(), true);
+            closeRegion(e.msg.getRegionInfo(), true);
             break;
 
-          case HMsg.MSG_REGION_MERGE:                   // Merge two regions
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MSG_REGION_MERGE");
-            }
-            //TODO ???
-            throw new IOException("TODO: need to figure out merge");
-            //break;
-
-          case HMsg.MSG_CALL_SERVER_STARTUP:            // Close regions, restart
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MSG_CALL_SERVER_STARTUP");
-            }
-            closeAllRegions();
-            continue;
-
-          case HMsg.MSG_REGIONSERVER_STOP:              // Go away
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MSG_REGIONSERVER_STOP");
-            }
-            stopRequested = true;
-            continue;
-
           case HMsg.MSG_REGION_CLOSE_WITHOUT_REPORT:    // Close a region, don't reply
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MSG_REGION_CLOSE_WITHOUT_REPORT");
-            }
-            closeRegion(msg.getRegionInfo(), false);
+            closeRegion(e.msg.getRegionInfo(), false);
             break;
 
           default:
-            throw new IOException("Impossible state during msg processing.  Instruction: " + msg);
+            throw new AssertionError(
+                "Impossible state during msg processing.  Instruction: "
+                + e.msg.toString());
+          }
+        } catch(IOException ie) {
+          if(e.tries < numRetries) {
+            LOG.warn(ie);
+            e.tries++;
+            synchronized(toDo) {
+              toDo.addLast(e);
+            }
+          } else {
+            LOG.error("unable to process message: " + e.msg.toString(), ie);
           }
-        } catch(IOException e) {
-          LOG.error(e);
         }
       }
       LOG.info("worker thread exiting");
@@ -761,15 +838,18 @@
   }
   
   void openRegion(HRegionInfo regionInfo) throws IOException {
-    this.lock.writeLock().lock();
-    try {
-      HRegion region =
-        new HRegion(regionDir, log, fs, conf, regionInfo, null, oldlogfile);
-      this.onlineRegions.put(region.getRegionName(), region);
-      reportOpen(region); 
-    } finally {
-      this.lock.writeLock().unlock();
+    HRegion region = onlineRegions.get(regionInfo.regionName);
+    if(region == null) {
+      region = new HRegion(rootDir, log, fs, conf, regionInfo, null);
+
+      this.lock.writeLock().lock();
+      try {
+        this.onlineRegions.put(region.getRegionName(), region);
+      } finally {
+        this.lock.writeLock().unlock();
+      }
     }
+    reportOpen(region); 
   }
 
   void closeRegion(final HRegionInfo hri, final boolean reportWhenCompleted)
@@ -791,7 +871,7 @@
   }
 
   /** Called either when the master tells us to restart or from stop() */
-  void closeAllRegions() {
+  Vector<HRegion> closeAllRegions() {
     Vector<HRegion> regionsToClose = new Vector<HRegion>();
     this.lock.writeLock().lock();
     try {
@@ -800,8 +880,7 @@
     } finally {
       this.lock.writeLock().unlock();
     }
-    for(Iterator<HRegion> it = regionsToClose.iterator(); it.hasNext(); ) {
-      HRegion region = it.next();
+    for(HRegion region: regionsToClose) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("closing region " + region.getRegionName());
       }
@@ -812,106 +891,117 @@
         LOG.error("error closing region " + region.getRegionName(), e);
       }
     }
+    return regionsToClose;
   }
 
   //////////////////////////////////////////////////////////////////////////////
   // HRegionInterface
   //////////////////////////////////////////////////////////////////////////////
 
-  /** Obtain a table descriptor for the given region */
-  public HRegionInfo getRegionInfo(Text regionName)
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#getRegionInfo(org.apache.hadoop.io.Text)
+   */
+  public HRegionInfo getRegionInfo(final Text regionName)
   throws NotServingRegionException {
     return getRegion(regionName).getRegionInfo();
   }
 
-  /** Get the indicated row/column */
-  public BytesWritable get(Text regionName, Text row, Text column)
-  throws IOException {
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#get(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text)
+   */
+  public BytesWritable get(final Text regionName, final Text row,
+      final Text column) throws IOException {
+    
     return getRegion(regionName).get(row, column);
   }
 
-  /** Get multiple versions of the indicated row/col */
-  public BytesWritable[] get(Text regionName, Text row, Text column, 
-      int numVersions)
-  throws IOException {
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#get(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, int)
+   */
+  public BytesWritable[] get(final Text regionName, final Text row,
+      final Text column, final int numVersions) throws IOException {
+    
     return getRegion(regionName).get(row, column, numVersions);
   }
 
-  /** Get multiple timestamped versions of the indicated row/col */
-  public BytesWritable[] get(Text regionName, Text row, Text column, 
-      long timestamp, int numVersions)
-  throws IOException {
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#get(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, long, int)
+   */
+  public BytesWritable[] get(final Text regionName, final Text row, final Text column, 
+      final long timestamp, final int numVersions) throws IOException {
+    
     return getRegion(regionName).get(row, column, timestamp, numVersions);
   }
 
-  /** Get all the columns (along with their names) for a given row. */
-  public LabelledData[] getRow(Text regionName, Text row) throws IOException {
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#getRow(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text)
+   */
+  public KeyedData[] getRow(final Text regionName, final Text row) throws IOException {
     HRegion region = getRegion(regionName);
     TreeMap<Text, BytesWritable> map = region.getFull(row);
-    LabelledData result[] = new LabelledData[map.size()];
+    KeyedData result[] = new KeyedData[map.size()];
     int counter = 0;
     for (Map.Entry<Text, BytesWritable> es: map.entrySet()) {
-      result[counter++] = new LabelledData(es.getKey(), es.getValue());
+      result[counter++] =
+        new KeyedData(new HStoreKey(row, es.getKey()), es.getValue());
     }
     return result;
   }
 
-  /**
-   * Start an update to the HBase.  This also creates a lease associated with
-   * the caller.
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#next(long)
    */
-  private static class RegionListener extends LeaseListener {
-    private HRegion localRegion;
-    private long localLockId;
-    
-    public RegionListener(HRegion region, long lockId) {
-      this.localRegion = region;
-      this.localLockId = lockId;
-    }
+  public KeyedData[] next(final long scannerId)
+      throws IOException {
     
-    public void leaseExpired() {
-      try {
-        localRegion.abort(localLockId);
-        
-      } catch(IOException iex) {
-        LOG.error(iex);
-      }
-    }
-  }
-  
-  public LabelledData[] next(final long scannerId, final HStoreKey key)
-  throws IOException {
     Text scannerName = new Text(String.valueOf(scannerId));
     HInternalScannerInterface s = scanners.get(scannerName);
     if (s == null) {
-      throw new UnknownScannerException("Name: " + scannerName + ", key " +
-        key);
+      throw new UnknownScannerException("Name: " + scannerName);
     }
     leases.renewLease(scannerName, scannerName);
+    
+    // Collect values to be returned here
+    
+    ArrayList<KeyedData> values = new ArrayList<KeyedData>();
+    
     TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
-    ArrayList<LabelledData> values = new ArrayList<LabelledData>();
-    // Keep getting rows till we find one that has at least one non-deleted
-    // column value.
+    
+    // Keep getting rows until we find one that has at least one non-deleted column value
+    
+    HStoreKey key = new HStoreKey();
     while (s.next(key, results)) {
       for(Map.Entry<Text, BytesWritable> e: results.entrySet()) {
+        HStoreKey k = new HStoreKey(key.getRow(), e.getKey(), key.getTimestamp());
         BytesWritable val = e.getValue();
         if(val.getSize() == DELETE_BYTES.getSize()
             && val.compareTo(DELETE_BYTES) == 0) {
           // Column value is deleted. Don't return it.
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("skipping deleted value for key: " + k.toString());
+          }
           continue;
         }
-        values.add(new LabelledData(e.getKey(), val));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("adding value for key: " + k.toString());
+        }
+        values.add(new KeyedData(k, val));
       }
-      if (values.size() > 0) {
-        // Row has something in it. Let it out. Else go get another row.
+      if(values.size() > 0) {
+        // Row has something in it. Return the value.
         break;
       }
-      // Need to clear results before we go back up and call 'next' again.
+      
+      // No data for this row, go get another.
+      
       results.clear();
     }
-    return values.toArray(new LabelledData[values.size()]);
+    return values.toArray(new KeyedData[values.size()]);
   }
 
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#startUpdate(org.apache.hadoop.io.Text, long, org.apache.hadoop.io.Text)
+   */
   public long startUpdate(Text regionName, long clientid, Text row) 
       throws IOException {
     HRegion region = getRegion(regionName);
@@ -923,7 +1013,29 @@
     return lockid;
   }
 
-  /** Add something to the HBase. */
+  /** Create a lease for an update. If it times out, the update is aborted */
+  private static class RegionListener implements LeaseListener {
+    private HRegion localRegion;
+    private long localLockId;
+    
+    RegionListener(HRegion region, long lockId) {
+      this.localRegion = region;
+      this.localLockId = lockId;
+    }
+    
+    public void leaseExpired() {
+      try {
+        localRegion.abort(localLockId);
+        
+      } catch(IOException iex) {
+        LOG.error(iex);
+      }
+    }
+  }
+  
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#put(org.apache.hadoop.io.Text, long, long, org.apache.hadoop.io.Text, org.apache.hadoop.io.BytesWritable)
+   */
   public void put(Text regionName, long clientid, long lockid, Text column, 
       BytesWritable val) throws IOException {
     HRegion region = getRegion(regionName, true);
@@ -932,7 +1044,9 @@
     region.put(lockid, column, val);
   }
 
-  /** Remove a cell from the HBase. */
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#delete(org.apache.hadoop.io.Text, long, long, org.apache.hadoop.io.Text)
+   */
   public void delete(Text regionName, long clientid, long lockid, Text column) 
       throws IOException {
     HRegion region = getRegion(regionName);
@@ -941,7 +1055,9 @@
     region.delete(lockid, column);
   }
 
-  /** Abandon the transaction */
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#abort(org.apache.hadoop.io.Text, long, long)
+   */
   public void abort(Text regionName, long clientid, long lockid) 
       throws IOException {
     HRegion region = getRegion(regionName, true);
@@ -950,7 +1066,9 @@
     region.abort(lockid);
   }
 
-  /** Confirm the transaction */
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#commit(org.apache.hadoop.io.Text, long, long)
+   */
   public void commit(Text regionName, long clientid, long lockid) 
   throws IOException {
     HRegion region = getRegion(regionName, true);
@@ -959,13 +1077,16 @@
     region.commit(lockid);
   }
 
-  /** Don't let the client's lease expire just yet...  */
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#renewLease(long, long)
+   */
   public void renewLease(long lockid, long clientid) throws IOException {
     leases.renewLease(new Text(String.valueOf(clientid)), 
         new Text(String.valueOf(lockid)));
   }
 
-  /** Private utility method for safely obtaining an HRegion handle.
+  /** 
+   * Private utility method for safely obtaining an HRegion handle.
    * @param regionName Name of online {@link HRegion} to return
    * @return {@link HRegion} for <code>regionName</code>
    * @throws NotServingRegionException
@@ -975,7 +1096,8 @@
     return getRegion(regionName, false);
   }
   
-  /** Private utility method for safely obtaining an HRegion handle.
+  /** 
+   * Private utility method for safely obtaining an HRegion handle.
    * @param regionName Name of online {@link HRegion} to return
    * @param checkRetiringRegions Set true if we're to check retiring regions
    * as well as online regions.
@@ -1013,14 +1135,21 @@
   //////////////////////////////////////////////////////////////////////////////
 
   Map<Text, HInternalScannerInterface> scanners;
-  
-  private class ScannerListener extends LeaseListener {
+
+  /** 
+   * Instantiated as a scanner lease.
+   * If the lease times out, the scanner is closed
+   */
+  private class ScannerListener implements LeaseListener {
     private Text scannerName;
     
-    public ScannerListener(Text scannerName) {
+    ScannerListener(Text scannerName) {
       this.scannerName = scannerName;
     }
     
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.LeaseListener#leaseExpired()
+     */
     public void leaseExpired() {
       LOG.info("Scanner " + scannerName + " lease expired");
       HInternalScannerInterface s = null;
@@ -1033,7 +1162,9 @@
     }
   }
   
-  /** Start a scanner for a given HRegion. */
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#openScanner(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text[], org.apache.hadoop.io.Text)
+   */
   public long openScanner(Text regionName, Text[] cols, Text firstRow)
   throws IOException {
     HRegion r = getRegion(regionName);
@@ -1054,6 +1185,9 @@
     return scannerId;
   }
   
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HRegionInterface#close(long)
+   */
   public void close(long scannerId) throws IOException {
     Text scannerName = new Text(String.valueOf(scannerId));
     HInternalScannerInterface s = null;
@@ -1080,6 +1214,9 @@
     System.exit(0);
   }
   
+  /**
+   * @param args
+   */
   public static void main(String [] args) {
     if (args.length < 1) {
       printUsageAndExit();
@@ -1100,7 +1237,7 @@
         try {
           (new Thread(new HRegionServer(conf))).start();
         } catch (Throwable t) {
-          LOG.error( "Can not start master because "+
+          LOG.error( "Can not start region server because "+
               StringUtils.stringifyException(t) );
           System.exit(-1);
         }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java Mon Jun 11 09:46:27 2007
@@ -36,12 +36,12 @@
  * file system only.
  * TODO: Add dumping of HStoreFile content and HLog.
  */
-public class HRegiondirReader {
+class HRegiondirReader {
   private final Configuration conf;
   private final Path parentdir;
   
-  private static final Pattern REGION_NAME_PARSER =
-    Pattern.compile(HGlobals.HREGIONDIR_PREFIX +
+  static final Pattern REGION_NAME_PARSER =
+    Pattern.compile(HConstants.HREGIONDIR_PREFIX +
         "([^_]+)_([^_]*)_([^_]*)");
   
   private static final String USAGE = "Usage: " +
@@ -50,7 +50,7 @@
   
   private final List<HRegionInfo> infos;
   
-  public HRegiondirReader(final HBaseConfiguration conf,
+  HRegiondirReader(final HBaseConfiguration conf,
       final String parentdirName)
   throws IOException {
     this.conf = conf;
@@ -65,6 +65,9 @@
     // Look for regions in parentdir.
     Path [] regiondirs =
       fs.listPaths(parentdir, new PathFilter() {
+        /* (non-Javadoc)
+         * @see org.apache.hadoop.fs.PathFilter#accept(org.apache.hadoop.fs.Path)
+         */
         public boolean accept(Path path) {
           Matcher m = REGION_NAME_PARSER.matcher(path.getName());
           return m != null && m.matches();
@@ -136,12 +139,11 @@
     return families.toArray(new Text [] {});
   }
   
-  public List <HRegionInfo> getRegions() {
+  List <HRegionInfo> getRegions() {
     return this.infos;
   }
   
-  public HRegionInfo getRegionInfo(final String tableName)
-  throws IOException {
+  HRegionInfo getRegionInfo(final String tableName) {
     HRegionInfo result = null;
     for(HRegionInfo i: getRegions()) {
       if(i.tableDesc.getName().equals(tableName)) {
@@ -162,7 +164,7 @@
   
   private void dump(final HRegionInfo info) throws IOException {
     HRegion r = new HRegion(this.parentdir, null,
-        FileSystem.get(this.conf), conf, info, null, null);
+        FileSystem.get(this.conf), conf, info, null);
     Text [] families = info.tableDesc.families().keySet().toArray(new Text [] {});
     HInternalScannerInterface scanner = r.getScanner(families, new Text());
     HStoreKey key = new HStoreKey();
@@ -201,6 +203,10 @@
     }
   }
   
+  /**
+   * @param args
+   * @throws IOException
+   */
   public static void main(String[] args) throws IOException {
     if (args.length < 1) {
       System.err.println(USAGE);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Mon Jun 11 09:46:27 2007
@@ -42,7 +42,7 @@
  * Locking and transactions are handled at a higher level.  This API should not 
  * be called directly by any writer, but rather by an HRegion manager.
  ******************************************************************************/
-public class HStore {
+class HStore implements HConstants {
   private static final Log LOG = LogFactory.getLog(HStore.class);
 
   static final String COMPACTION_DIR = "compaction.tmp";
@@ -64,7 +64,7 @@
   Integer compactLock = 0;
   Integer flushLock = 0;
 
-  private final HLocking lock = new HLocking();
+  final HLocking lock = new HLocking();
 
   TreeMap<Long, MapFile.Reader> maps = new TreeMap<Long, MapFile.Reader>();
   TreeMap<Long, HStoreFile> mapFiles = new TreeMap<Long, HStoreFile>();
@@ -98,8 +98,16 @@
    *
    * <p>It's assumed that after this constructor returns, the reconstructionLog
    * file will be deleted (by whoever has instantiated the HStore).
+   *
+   * @param dir         - log file directory
+   * @param regionName  - name of region
+   * @param family      - name of column family
+   * @param fs          - file system object
+   * @param reconstructionLog - existing log file to apply if any
+   * @param conf        - configuration object
+   * @throws IOException
    */
-  public HStore(Path dir, Text regionName, HColumnDescriptor family, 
+  HStore(Path dir, Text regionName, HColumnDescriptor family, 
       FileSystem fs, Path reconstructionLog, Configuration conf)
   throws IOException {  
     this.dir = dir;
@@ -200,18 +208,25 @@
           // Check this edit is for me.  Also, guard against writing
           // METACOLUMN info such as HBASE::CACHEFLUSH entries
           Text column = val.getColumn();
-          if (!key.getRegionName().equals(this.regionName) ||
-              column.equals(HLog.METACOLUMN) ||
-              HStoreKey.extractFamily(column).equals(this.familyName)) {
+          if (column.equals(HLog.METACOLUMN)
+              || !key.getRegionName().equals(this.regionName)
+              || !HStoreKey.extractFamily(column).equals(this.familyName)) {
             if (LOG.isDebugEnabled()) {
               LOG.debug("Passing on edit " + key.getRegionName() + ", "
-                  + key.getRegionName() + ", " + column.toString() + ": "
-                  + new String(val.getVal().get()));
+                  + column.toString() + ": " + new String(val.getVal().get())
+                  + ", my region: " + this.regionName + ", my column: "
+                  + this.familyName);
             }
             continue;
           }
-          reconstructedCache.put(new HStoreKey(key.getRow(), val.getColumn(), 
-              val.getTimestamp()), val.getVal());
+          byte[] bytes = new byte[val.getVal().getSize()];
+          System.arraycopy(val.getVal().get(), 0, bytes, 0, bytes.length);
+          HStoreKey k = new HStoreKey(key.getRow(), column,val.getTimestamp());
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Applying edit " + k.toString() + "="
+                + new String(bytes, UTF8_ENCODING));
+          }
+          reconstructedCache.put(k, new BytesWritable(bytes));
         }
       } finally {
         login.close();
@@ -248,8 +263,11 @@
     LOG.info("HStore online for " + this.regionName + "/" + this.familyName);
   }
 
-  /** Turn off all the MapFile readers */
-  public void close() throws IOException {
+  /**
+   * Turn off all the MapFile readers
+   * @throws IOException
+   */
+  void close() throws IOException {
     LOG.info("closing HStore for " + this.regionName + "/" + this.familyName);
     this.lock.obtainWriteLock();
     try {
@@ -279,8 +297,13 @@
    * Also, we are not expecting any reads of this MapFile just yet.
    *
    * Return the entire list of HStoreFiles currently used by the HStore.
+   *
+   * @param inputCache          - memcache to flush
+   * @param logCacheFlushId     - flush sequence number
+   * @return - Vector of all the HStoreFiles in use
+   * @throws IOException
    */
-  public Vector<HStoreFile> flushCache(TreeMap<HStoreKey, BytesWritable> inputCache,
+  Vector<HStoreFile> flushCache(TreeMap<HStoreKey, BytesWritable> inputCache,
       long logCacheFlushId) throws IOException {
     
     return flushCacheHelper(inputCache, logCacheFlushId, true);
@@ -351,7 +374,10 @@
     }
   }
 
-  public Vector<HStoreFile> getAllMapFiles() {
+  /**
+   * @return - vector of all the HStore files in use
+   */
+  Vector<HStoreFile> getAllMapFiles() {
     this.lock.obtainReadLock();
     try {
       return new Vector<HStoreFile>(mapFiles.values());
@@ -380,8 +406,10 @@
    * 
    * We don't want to hold the structureLock for the whole time, as a compact() 
    * can be lengthy and we want to allow cache-flushes during this period.
+   * 
+   * @throws IOException
    */
-  public void compact() throws IOException {
+  void compact() throws IOException {
     compactHelper(false);
   }
   
@@ -766,7 +794,7 @@
    *
    * The returned object should map column names to byte arrays (byte[]).
    */
-  public void getFull(HStoreKey key, TreeMap<Text, BytesWritable> results) throws IOException {
+  void getFull(HStoreKey key, TreeMap<Text, BytesWritable> results) throws IOException {
     this.lock.obtainReadLock();
     try {
       MapFile.Reader[] maparray 
@@ -806,7 +834,7 @@
    *
    * If 'numVersions' is negative, the method returns all available versions.
    */
-  public BytesWritable[] get(HStoreKey key, int numVersions) throws IOException {
+  BytesWritable[] get(HStoreKey key, int numVersions) throws IOException {
     if(numVersions <= 0) {
       throw new IllegalArgumentException("Number of versions must be > 0");
     }
@@ -833,10 +861,9 @@
               if(numVersions > 0 && (results.size() >= numVersions)) {
                 break;
                 
-              } else {
-                results.add(readval);
-                readval = new BytesWritable();
               }
+              results.add(readval);
+              readval = new BytesWritable();
             }
           }
         }
@@ -845,12 +872,8 @@
         }
       }
 
-      if(results.size() == 0) {
-        return null;
-        
-      } else {
-        return results.toArray(new BytesWritable[results.size()]);
-      }
+      return results.size() == 0 ?
+          null :results.toArray(new BytesWritable[results.size()]);
       
     } finally {
       this.lock.releaseReadLock();
@@ -863,7 +886,7 @@
    * @param midKey      - the middle key for the largest MapFile
    * @return            - size of the largest MapFile
    */
-  public long getLargestFileSize(Text midKey) {
+  long getLargestFileSize(Text midKey) {
     long maxSize = 0L;
     if (this.mapFiles.size() <= 0) {
       return maxSize;
@@ -904,7 +927,7 @@
   /**
    * @return    Returns the number of map files currently in use
    */
-  public int getNMaps() {
+  int getNMaps() {
     this.lock.obtainReadLock();
     try {
       return maps.size();
@@ -933,7 +956,7 @@
    * Return a set of MapFile.Readers, one for each HStore file.
    * These should be closed after the user is done with them.
    */
-  public HInternalScannerInterface getScanner(long timestamp, Text targetCols[],
+  HInternalScannerInterface getScanner(long timestamp, Text targetCols[],
       Text firstRow) throws IOException {
     
     return new HStoreScanner(timestamp, targetCols, firstRow);
@@ -947,7 +970,7 @@
   class HStoreScanner extends HAbstractScanner {
     private MapFile.Reader[] readers;
     
-    public HStoreScanner(long timestamp, Text[] targetCols, Text firstRow)
+    HStoreScanner(long timestamp, Text[] targetCols, Text firstRow)
         throws IOException {
       
       super(timestamp, targetCols);
@@ -1000,6 +1023,7 @@
      * @param firstRow  - seek to this row
      * @return          - true if this is the first row or if the row was not found
      */
+    @Override
     boolean findFirstRow(int i, Text firstRow) throws IOException {
       HStoreKey firstKey
         = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), vals[i]);
@@ -1023,6 +1047,7 @@
      * @param i - which reader to fetch next value from
      * @return - true if there is more data available
      */
+    @Override
     boolean getNext(int i) throws IOException {
       vals[i] = new BytesWritable();
       if(! readers[i].next(keys[i], vals[i])) {
@@ -1033,6 +1058,7 @@
     }
     
     /** Close down the indicated reader. */
+    @Override
     void closeSubScanner(int i) {
       try {
         if(readers[i] != null) {
@@ -1052,6 +1078,7 @@
     }
 
     /** Shut it down! */
+    @Override
     public void close() {
       if(! scannerClosed) {
         try {



Mime
View raw message