hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r548523 [2/2] - in /lucene/hadoop/trunk/src/contrib/hbase: ./ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/
Date Mon, 18 Jun 2007 22:59:16 GMT
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java Mon Jun 18 15:59:14 2007
@@ -45,7 +45,8 @@
       throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
     }
     String host = hostAndPort.substring(0, colonIndex);
-    int port = Integer.valueOf(hostAndPort.substring(colonIndex + 1));
+    int port =
+      Integer.valueOf(hostAndPort.substring(colonIndex + 1)).intValue();
     this.address = new InetSocketAddress(host, port);
     this.stringValue = hostAndPort;
   }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Mon Jun 18 15:59:14 2007
@@ -18,7 +18,10 @@
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.TreeMap;
@@ -31,7 +34,7 @@
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.io.MapFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
@@ -40,14 +43,14 @@
 
 import org.onelab.filter.*;
 
-/*******************************************************************************
+/**
  * HStore maintains a bunch of data files.  It is responsible for maintaining 
  * the memory/file hierarchy and for periodic flushes to disk and compacting 
  * edits to the file.
  *
  * Locking and transactions are handled at a higher level.  This API should not 
  * be called directly by any writer, but rather by an HRegion manager.
- ******************************************************************************/
+ */
 class HStore implements HConstants {
   private static final Log LOG = LogFactory.getLog(HStore.class);
 
@@ -71,8 +74,8 @@
   Path filterDir;
   Filter bloomFilter;
 
-  Integer compactLock = 0;
-  Integer flushLock = 0;
+  Integer compactLock = new Integer(0);
+  Integer flushLock = new Integer(0);
 
   final HLocking lock = new HLocking();
 
@@ -81,10 +84,6 @@
 
   Random rand = new Random();
 
-  //////////////////////////////////////////////////////////////////////////////
-  // Constructors, destructors, etc
-  //////////////////////////////////////////////////////////////////////////////
-
   /**
    * An HStore is a set of zero or more MapFiles, which stretch backwards over 
    * time.  A given HStore is responsible for a certain set of columns for a
@@ -109,12 +108,12 @@
    * <p>It's assumed that after this constructor returns, the reconstructionLog
    * file will be deleted (by whoever has instantiated the HStore).
    *
-   * @param dir         - log file directory
-   * @param regionName  - name of region
-   * @param family      - name of column family
-   * @param fs          - file system object
-   * @param reconstructionLog - existing log file to apply if any
-   * @param conf        - configuration object
+   * @param dir log file directory
+   * @param regionName name of region
+   * @param family name of column family
+   * @param fs file system object
+   * @param reconstructionLog existing log file to apply if any
+   * @param conf configuration object
    * @throws IOException
    */
   HStore(Path dir, Text regionName, HColumnDescriptor family, 
@@ -178,9 +177,8 @@
     // file, the entry in 'mapdir' must be deleted.
     Vector<HStoreFile> hstoreFiles 
       = HStoreFile.loadHStoreFiles(conf, dir, regionName, familyName, fs);
-    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
-      HStoreFile hsf = it.next();
-      mapFiles.put(hsf.loadInfo(fs), hsf);
+    for(HStoreFile hsf: hstoreFiles) {
+      mapFiles.put(Long.valueOf(hsf.loadInfo(fs)), hsf);
     }
 
     // Now go through all the HSTORE_LOGINFOFILEs and figure out the
@@ -192,8 +190,7 @@
     // means it was built prior to the previous run of HStore, and so it cannot 
     // contain any updates also contained in the log.
     long maxSeqID = -1;
-    for (Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
-      HStoreFile hsf = it.next();
+    for (HStoreFile hsf: hstoreFiles) {
       long seqid = hsf.loadInfo(fs);
       if(seqid > 0) {
         if(seqid > maxSeqID) {
@@ -202,68 +199,8 @@
       }
     }
 
-    // If a bloom filter is enabled, try to read it in.
-    // If it doesn't exist, create it.
+    doReconstructionLog(reconstructionLog, maxSeqID);
     
-    // Read the reconstructionLog to see whether we need to build a brand-new 
-    // MapFile out of non-flushed log entries.  
-    //
-    // We can ignore any log message that has a sequence ID that's equal to or 
-    // lower than maxSeqID.  (Because we know such log messages are already 
-    // reflected in the MapFiles.)
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("reading reconstructionLog");
-    }
-    if(reconstructionLog != null && fs.exists(reconstructionLog)) {
-      long maxSeqIdInLog = -1;
-      TreeMap<HStoreKey, BytesWritable> reconstructedCache 
-        = new TreeMap<HStoreKey, BytesWritable>();
-      SequenceFile.Reader login 
-        = new SequenceFile.Reader(fs, reconstructionLog, conf);
-      try {
-        HLogKey key = new HLogKey();
-        HLogEdit val = new HLogEdit();
-        while(login.next(key, val)) {
-          maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
-          if (key.getLogSeqNum() <= maxSeqID) {
-            continue;
-          }
-          // Check this edit is for me.  Also, guard against writing
-          // METACOLUMN info such as HBASE::CACHEFLUSH entries
-          Text column = val.getColumn();
-          if (column.equals(HLog.METACOLUMN)
-              || !key.getRegionName().equals(this.regionName)
-              || !HStoreKey.extractFamily(column).equals(this.familyName)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Passing on edit " + key.getRegionName() + ", "
-                  + column.toString() + ": " + new String(val.getVal().get())
-                  + ", my region: " + this.regionName + ", my column: "
-                  + this.familyName);
-            }
-            continue;
-          }
-          byte[] bytes = new byte[val.getVal().getSize()];
-          System.arraycopy(val.getVal().get(), 0, bytes, 0, bytes.length);
-          HStoreKey k = new HStoreKey(key.getRow(), column,val.getTimestamp());
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Applying edit " + k.toString() + "="
-                + new String(bytes, UTF8_ENCODING));
-          }
-          reconstructedCache.put(k, new BytesWritable(bytes));
-        }
-      } finally {
-        login.close();
-      }
-
-      if(reconstructedCache.size() > 0) {
-        // We create a "virtual flush" at maxSeqIdInLog+1.
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("flushing reconstructionCache");
-        }
-        flushCacheHelper(reconstructedCache, maxSeqIdInLog+1, true);
-      }
-    }
-
     // Compact all the MapFiles into a single file.  The resulting MapFile 
     // should be "timeless"; that is, it should not have an associated seq-ID, 
     // because all log messages have been reflected in the TreeMaps at this
@@ -286,6 +223,70 @@
     LOG.info("HStore online for " + this.regionName + "/" + this.familyName);
   }
   
+  /*
+   * Read the reconstructionLog to see whether we need to build a brand-new 
+   * MapFile out of non-flushed log entries.  
+   *
+   * We can ignore any log message that has a sequence ID that's equal to or 
+   * lower than maxSeqID.  (Because we know such log messages are already 
+   * reflected in the MapFiles.)
+   */
+  private void doReconstructionLog(final Path reconstructionLog,
+      final long maxSeqID)
+  throws UnsupportedEncodingException, IOException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("reading reconstructionLog");
+    }
+    if (reconstructionLog == null || !fs.exists(reconstructionLog)) {
+      return;
+    }
+    long maxSeqIdInLog = -1;
+    TreeMap<HStoreKey, byte []> reconstructedCache =
+      new TreeMap<HStoreKey, byte []>();
+    SequenceFile.Reader login =
+      new SequenceFile.Reader(this.fs, reconstructionLog, this.conf);
+    try {
+      HLogKey key = new HLogKey();
+      HLogEdit val = new HLogEdit();
+      while (login.next(key, val)) {
+        maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
+        if (key.getLogSeqNum() <= maxSeqID) {
+          continue;
+        }
+        // Check this edit is for me. Also, guard against writing
+        // METACOLUMN info such as HBASE::CACHEFLUSH entries
+        Text column = val.getColumn();
+        if (column.equals(HLog.METACOLUMN)
+            || !key.getRegionName().equals(this.regionName)
+            || !HStoreKey.extractFamily(column).equals(this.familyName)) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Passing on edit " + key.getRegionName() + ", "
+                + column.toString() + ": " + new String(val.getVal())
+                + ", my region: " + this.regionName + ", my column: "
+                + this.familyName);
+          }
+          continue;
+        }
+        HStoreKey k = new HStoreKey(key.getRow(), column, val.getTimestamp());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Applying edit " + k.toString() + "=" +
+            new String(val.getVal(), UTF8_ENCODING));
+        }
+        reconstructedCache.put(k, val.getVal());
+      }
+    } finally {
+      login.close();
+    }
+    
+    if (reconstructedCache.size() > 0) {
+      // We create a "virtual flush" at maxSeqIdInLog+1.
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("flushing reconstructionCache");
+      }
+      flushCacheHelper(reconstructedCache, maxSeqIdInLog + 1, true);
+    }
+  }
+  
   //////////////////////////////////////////////////////////////////////////////
   // Bloom filters
   //////////////////////////////////////////////////////////////////////////////
@@ -423,15 +424,20 @@
   
   /**
    * Get a MapFile writer
-   * This allows us to substitute a BloomFilterWriter if a bloom filter is enabled
+   * This allows us to substitute a BloomFilterWriter if a bloom filter is
+   * enabled
+   * 
+   * @param dirName Directory with store files.
+   * @return Map file.
+   * @throws IOException
    */
   MapFile.Writer getMapFileWriter(String dirName) throws IOException {
-    if(bloomFilter != null) {
+    if (bloomFilter != null) {
       return new BloomFilterWriter(conf, fs, dirName, HStoreKey.class,
-          BytesWritable.class, compression);
+        ImmutableBytesWritable.class, compression);
     }
     return new MapFile.Writer(conf, fs, dirName, HStoreKey.class,
-        BytesWritable.class, compression);
+        ImmutableBytesWritable.class, compression);
   }
   
   //////////////////////////////////////////////////////////////////////////////
@@ -440,6 +446,7 @@
 
   /**
    * Turn off all the MapFile readers
+   * 
    * @throws IOException
    */
   void close() throws IOException {
@@ -478,14 +485,15 @@
    * @return - Vector of all the HStoreFiles in use
    * @throws IOException
    */
-  Vector<HStoreFile> flushCache(TreeMap<HStoreKey, BytesWritable> inputCache,
-      long logCacheFlushId) throws IOException {
-    
+  Vector<HStoreFile> flushCache(TreeMap<HStoreKey, byte []> inputCache,
+      long logCacheFlushId)
+  throws IOException {
     return flushCacheHelper(inputCache, logCacheFlushId, true);
   }
   
-  Vector<HStoreFile> flushCacheHelper(TreeMap<HStoreKey, BytesWritable> inputCache,
-      long logCacheFlushId, boolean addToAvailableMaps) throws IOException {
+  Vector<HStoreFile> flushCacheHelper(TreeMap<HStoreKey, byte []> inputCache,
+      long logCacheFlushId, boolean addToAvailableMaps)
+  throws IOException {
     
     synchronized(flushLock) {
       if(LOG.isDebugEnabled()) {
@@ -503,12 +511,11 @@
       }
       
       MapFile.Writer out = getMapFileWriter(mapfile.toString());
-      
       try {
-        for (Map.Entry<HStoreKey, BytesWritable> es: inputCache.entrySet()) {
+        for (Map.Entry<HStoreKey, byte []> es: inputCache.entrySet()) {
           HStoreKey curkey = es.getKey();
           if (this.familyName.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
-            out.append(curkey, es.getValue());
+            out.append(curkey, new ImmutableBytesWritable(es.getValue()));
           }
         }
         if(LOG.isDebugEnabled()) {
@@ -539,13 +546,13 @@
         this.lock.obtainWriteLock();
         
         try {
-          maps.put(logCacheFlushId, getMapFileReader(mapfile.toString()));
-          mapFiles.put(logCacheFlushId, flushedFile);
+          Long flushid = Long.valueOf(logCacheFlushId);
+          maps.put(flushid, getMapFileReader(mapfile.toString()));
+          mapFiles.put(flushid, flushedFile);
           if(LOG.isDebugEnabled()) {
             LOG.debug("HStore available for " + this.regionName + "/"
                 + this.familyName + " flush id=" + logCacheFlushId);
           }
-        
         } finally {
           this.lock.releaseWriteLock();
         }
@@ -627,7 +634,7 @@
           }
         }
         if(LOG.isDebugEnabled()) {
-          LOG.debug("max sequence id =" + maxSeenSeqID);
+          LOG.debug("max sequence id: " + maxSeenSeqID);
         }
         
         HStoreFile compactedOutputFile 
@@ -645,10 +652,8 @@
         }
 
         // Step through them, writing to the brand-new TreeMap
-
         MapFile.Writer compactedOut =
           getMapFileWriter(compactedOutputFile.getMapFilePath().toString());
-        
         try {
 
           // We create a new set of MapFile.Reader objects so we don't screw up 
@@ -665,14 +670,15 @@
 
           MapFile.Reader[] readers = new MapFile.Reader[toCompactFiles.size()];
           HStoreKey[] keys = new HStoreKey[toCompactFiles.size()];
-          BytesWritable[] vals = new BytesWritable[toCompactFiles.size()];
+          ImmutableBytesWritable[] vals =
+            new ImmutableBytesWritable[toCompactFiles.size()];
           boolean[] done = new boolean[toCompactFiles.size()];
           int pos = 0;
           for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
             HStoreFile hsf = it.next();
             readers[pos] = getMapFileReader(hsf.getMapFilePath().toString());
             keys[pos] = new HStoreKey();
-            vals[pos] = new BytesWritable();
+            vals[pos] = new ImmutableBytesWritable();
             done[pos] = false;
             pos++;
           }
@@ -942,7 +948,7 @@
 
       // Fail here?  No worries.
       
-      long orderVal = finalCompactedFile.loadInfo(fs);
+      Long orderVal = Long.valueOf(finalCompactedFile.loadInfo(fs));
 
       // 6. Loading the new TreeMap.
       
@@ -973,27 +979,24 @@
    *
    * The returned object should map column names to byte arrays (byte[]).
    */
-  void getFull(HStoreKey key, TreeMap<Text, BytesWritable> results) throws IOException {
+  void getFull(HStoreKey key, TreeMap<Text, byte []> results)
+  throws IOException {
     this.lock.obtainReadLock();
     try {
       MapFile.Reader[] maparray 
         = maps.values().toArray(new MapFile.Reader[maps.size()]);
-      
-      for(int i = maparray.length-1; i >= 0; i--) {
+      for (int i = maparray.length - 1; i >= 0; i--) {
         MapFile.Reader map = maparray[i];
-
         synchronized(map) {
-          BytesWritable readval = new BytesWritable();
           map.reset();
+          ImmutableBytesWritable readval = new ImmutableBytesWritable();
           HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
-          
           do {
             Text readcol = readkey.getColumn();
-            if(results.get(readcol) == null
+            if (results.get(readcol) == null
                 && key.matchesWithoutColumn(readkey)) {
-              results.put(new Text(readcol), readval);
-              readval = new BytesWritable();
-              
+              results.put(new Text(readcol), readval.get());
+              readval = new ImmutableBytesWritable();
             } else if(key.getRow().compareTo(readkey.getRow()) > 0) {
               break;
             }
@@ -1013,12 +1016,12 @@
    *
    * If 'numVersions' is negative, the method returns all available versions.
    */
-  BytesWritable[] get(HStoreKey key, int numVersions) throws IOException {
+  byte [][] get(HStoreKey key, int numVersions) throws IOException {
     if (numVersions <= 0) {
       throw new IllegalArgumentException("Number of versions must be > 0");
     }
     
-    Vector<BytesWritable> results = new Vector<BytesWritable>();
+    List<byte []> results = new ArrayList<byte []>();
     this.lock.obtainReadLock();
     try {
       MapFile.Reader[] maparray 
@@ -1028,7 +1031,7 @@
         MapFile.Reader map = maparray[i];
 
         synchronized(map) {
-          BytesWritable readval = new BytesWritable();
+          ImmutableBytesWritable readval = new ImmutableBytesWritable();
           map.reset();
           HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
           if (readkey == null) {
@@ -1039,14 +1042,14 @@
             continue;
           }
           if (readkey.matchesRowCol(key)) {
-            results.add(readval);
-            readval = new BytesWritable();
+            results.add(readval.get());
+            readval = new ImmutableBytesWritable();
             while(map.next(readkey, readval) && readkey.matchesRowCol(key)) {
               if (numVersions > 0 && (results.size() >= numVersions)) {
                 break;
               }
-              results.add(readval);
-              readval = new BytesWritable();
+              results.add(readval.get());
+              readval = new ImmutableBytesWritable();
             }
           }
         }
@@ -1056,8 +1059,7 @@
       }
 
       return results.size() == 0 ?
-          null :results.toArray(new BytesWritable[results.size()]);
-      
+        null : ImmutableBytesWritable.toArray(results);
     } finally {
       this.lock.releaseReadLock();
     }
@@ -1077,17 +1079,12 @@
     
     this.lock.obtainReadLock();
     try {
-      long mapIndex = 0L;
-
+      Long mapIndex = Long.valueOf(0L);
       // Iterate through all the MapFiles
-
-      for(Iterator<Map.Entry<Long, HStoreFile>> it = mapFiles.entrySet().iterator();
-          it.hasNext(); ) {
-
-        Map.Entry<Long, HStoreFile> e = it.next();
+      for(Map.Entry<Long, HStoreFile> e: mapFiles.entrySet()) {
         HStoreFile curHSF = e.getValue();
-        long size = fs.getLength(new Path(curHSF.getMapFilePath(), MapFile.DATA_FILE_NAME));
-
+        long size = fs.getLength(
+          new Path(curHSF.getMapFilePath(), MapFile.DATA_FILE_NAME));
         if(size > maxSize) {              // This is the largest one so far
           maxSize = size;
           mapIndex = e.getKey();
@@ -1095,12 +1092,9 @@
       }
 
       MapFile.Reader r = maps.get(mapIndex);
-
       midKey.set(((HStoreKey)r.midKey()).getRow());
-
     } catch(IOException e) {
       LOG.warn(e);
-
     } finally {
       this.lock.releaseReadLock();
     }
@@ -1171,14 +1165,12 @@
         }
         
         this.keys = new HStoreKey[readers.length];
-        this.vals = new BytesWritable[readers.length];
+        this.vals = new byte[readers.length][];
 
         // Advance the readers to the first pos.
-
         for(i = 0; i < readers.length; i++) {
           keys[i] = new HStoreKey();
-          vals[i] = new BytesWritable();
-
+          
           if(firstRow.getLength() != 0) {
             if(findFirstRow(i, firstRow)) {
               continue;
@@ -1208,16 +1200,15 @@
      */
     @Override
     boolean findFirstRow(int i, Text firstRow) throws IOException {
+      ImmutableBytesWritable ibw = new ImmutableBytesWritable();
       HStoreKey firstKey
-        = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), vals[i]);
-      
-      if(firstKey == null) {
-        
+        = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), ibw);
+      if (firstKey == null) {
         // Didn't find it. Close the scanner and return TRUE
-        
         closeSubScanner(i);
         return true;
       }
+      this.vals[i] = ibw.get();
       keys[i].setRow(firstKey.getRow());
       keys[i].setColumn(firstKey.getColumn());
       keys[i].setVersion(firstKey.getTimestamp());
@@ -1232,11 +1223,12 @@
      */
     @Override
     boolean getNext(int i) throws IOException {
-      vals[i] = new BytesWritable();
-      if(! readers[i].next(keys[i], vals[i])) {
+      ImmutableBytesWritable ibw = new ImmutableBytesWritable();
+      if (!readers[i].next(keys[i], ibw)) {
         closeSubScanner(i);
         return false;
       }
+      vals[i] = ibw.get();
       return true;
     }
     

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java Mon Jun 18 15:59:14 2007
@@ -15,6 +15,7 @@
  */
 package org.apache.hadoop.hbase;
 
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.conf.*;
@@ -213,41 +214,34 @@
     MapFile.Reader in = new MapFile.Reader(fs, getMapFilePath().toString(), conf);
     try {
       MapFile.Writer outA = new MapFile.Writer(conf, fs, 
-          dstA.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
-      
+        dstA.getMapFilePath().toString(), HStoreKey.class,
+        ImmutableBytesWritable.class);
       try {
         MapFile.Writer outB = new MapFile.Writer(conf, fs, 
-            dstB.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
-        
+          dstB.getMapFilePath().toString(), HStoreKey.class,
+          ImmutableBytesWritable.class);
         try {
           HStoreKey readkey = new HStoreKey();
-          BytesWritable readval = new BytesWritable();
-          
+          ImmutableBytesWritable readval = new ImmutableBytesWritable();
           while(in.next(readkey, readval)) {
             Text key = readkey.getRow();
-            
             if(key.compareTo(midKey) < 0) {
               outA.append(readkey, readval);
-              
             } else {
               outB.append(readkey, readval);
             }
           }
-          
         } finally {
           outB.close();
         }
-        
       } finally {
         outA.close();
       }
-      
     } finally {
       in.close();
     }
 
     // Build an InfoFile for each output
-
     long seqid = loadInfo(fs);
     dstA.writeInfo(fs, seqid);
     dstB.writeInfo(fs, seqid);
@@ -262,8 +256,9 @@
 
     // Copy all the source MapFile tuples into this HSF's MapFile
 
-    MapFile.Writer out = new MapFile.Writer(conf, fs, getMapFilePath().toString(),
-        HStoreKey.class, BytesWritable.class);
+    MapFile.Writer out = new MapFile.Writer(conf, fs,
+      getMapFilePath().toString(),
+      HStoreKey.class, ImmutableBytesWritable.class);
     
     try {
       for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
@@ -272,11 +267,10 @@
         
         try {
           HStoreKey readkey = new HStoreKey();
-          BytesWritable readval = new BytesWritable();
+          ImmutableBytesWritable readval = new ImmutableBytesWritable();
           while(in.next(readkey, readval)) {
             out.append(readkey, readval);
           }
-          
         } finally {
           in.close();
         }
@@ -287,12 +281,10 @@
     }
 
     // Build a unified InfoFile from the source InfoFiles.
-
     long unifiedSeqId = -1;
     for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
       HStoreFile hsf = it.next();
       long curSeqId = hsf.loadInfo(fs);
-      
       if(curSeqId > unifiedSeqId) {
         unifiedSeqId = curSeqId;
       }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java Mon Jun 18 15:59:14 2007
@@ -19,25 +19,72 @@
 
 import java.io.*;
 
-/*******************************************************************************
+/**
  * A Key for a stored row
- ******************************************************************************/
+ */
 public class HStoreKey implements WritableComparable {
+  // TODO: Move these utility methods elsewhere (To a Column class?).
+  /**
+   * Extracts the column family name from a column
+   * For example, returns 'info' if the specified column was 'info:server'
+   * @param col name of column
+   * @return column family name
+   * @throws InvalidColumnNameException 
+   */
+  public static Text extractFamily(final Text col)
+  throws InvalidColumnNameException {
+    return extractFamily(col, false);
+  }
   
   /**
    * Extracts the column family name from a column
    * For example, returns 'info' if the specified column was 'info:server'
-   * 
-   * @param col         - name of column
-   * @return            - column family name
-   */
-  public static Text extractFamily(Text col) {
-    String column = col.toString();
-    int colpos = column.indexOf(":");
-    if(colpos < 0) {
-      throw new IllegalArgumentException("Illegal column name has no family indicator: " + column);
+   * @param col name of column
+   * @param withColon if returned family name should include the ':' suffix.
+   * @return column family name
+   * @throws InvalidColumnNameException 
+   */
+  public static Text extractFamily(final Text col, final boolean withColon)
+  throws InvalidColumnNameException {
+    int offset = getColonOffset(col);
+    // Include ':' in copy?
+    offset += (withColon)? 1: 0;
+    if (offset == col.getLength()) {
+      return col;
+    }
+    byte [] buffer = new byte[offset];
+    System.arraycopy(col.getBytes(), 0, buffer, 0, offset);
+    return new Text(buffer);
+  }
+  
+  /**
+   * Extracts the column qualifier, the portion that follows the colon (':')
+   * family/qualifier separator.
+   * For example, returns 'server' if the specified column was 'info:server'
+   * @param col name of column
+   * @return column qualifier or null if there is no qualifier.
+   * @throws InvalidColumnNameException 
+   */
+  public static Text extractQualifier(final Text col)
+  throws InvalidColumnNameException {
+    int offset = getColonOffset(col);
+    if (offset + 1 == col.getLength()) {
+      return null;
     }
-    return new Text(column.substring(0, colpos));
+    int bufferLength = col.getLength() - (offset + 1);
+    byte [] buffer = new byte[bufferLength];
+    System.arraycopy(col.getBytes(), offset + 1, buffer, 0, bufferLength);
+    return new Text(buffer);
+  }
+  
+  private static int getColonOffset(final Text col)
+  throws InvalidColumnNameException {
+    int offset = col.find(":");
+    if(offset < 0) {
+      throw new InvalidColumnNameException(col + " is missing the colon " +
+        "family/qualifier separator");
+    }
+    return offset;
   }
 
   Text row;
@@ -68,8 +115,8 @@
    * Create an HStoreKey specifying the row and timestamp
    * The column name defaults to the empty string
    * 
-   * @param row         - row key
-   * @param timestamp   - timestamp value
+   * @param row row key
+   * @param timestamp timestamp value
    */
   public HStoreKey(Text row, long timestamp) {
     this.row = new Text(row);
@@ -81,8 +128,8 @@
    * Create an HStoreKey specifying the row and column names
    * The timestamp defaults to Long.MAX_VALUE
    * 
-   * @param row         - row key
-   * @param column      - column key
+   * @param row row key
+   * @param column column key
    */
   public HStoreKey(Text row, Text column) {
     this.row = new Text(row);
@@ -93,9 +140,9 @@
   /**
    * Create an HStoreKey specifying all the fields
    * 
-   * @param row         - row key
-   * @param column      - column key
-   * @param timestamp   - timestamp value
+   * @param row row key
+   * @param column column key
+   * @param timestamp timestamp value
    */
   public HStoreKey(Text row, Text column, long timestamp) {
     this.row = new Text(row);
@@ -106,7 +153,7 @@
   /**
    * Construct a new HStoreKey from another
    * 
-   * @param other - the source key
+   * @param other the source key
    */
   public HStoreKey(HStoreKey other) {
     this();
@@ -118,7 +165,7 @@
   /**
    * Change the value of the row key
    * 
-   * @param newrow      - new row key value
+   * @param newrow new row key value
    */
   public void setRow(Text newrow) {
     this.row.set(newrow);
@@ -127,7 +174,7 @@
   /**
    * Change the value of the column key
    * 
-   * @param newcol      - new column key value
+   * @param newcol new column key value
    */
   public void setColumn(Text newcol) {
     this.column.set(newcol);
@@ -136,7 +183,7 @@
   /**
    * Change the value of the timestamp field
    * 
-   * @param timestamp   - new timestamp value
+   * @param timestamp new timestamp value
    */
   public void setVersion(long timestamp) {
     this.timestamp = timestamp;
@@ -145,7 +192,7 @@
   /**
    * Set the value of this HStoreKey from the supplied key
    * 
-   * @param k - key value to copy
+   * @param k key value to copy
    */
   public void set(HStoreKey k) {
     this.row = k.getRow();
@@ -192,16 +239,18 @@
   }
   
   /**
-   * @param other Key to compare against. Compares row and column family
+   * @param that Key to compare against. Compares row and column family
    * 
    * @return true if same row and column family
+   * @throws InvalidColumnNameException 
    * @see #matchesRowCol(HStoreKey)
    * @see #matchesWithoutColumn(HStoreKey)
    */
-  public boolean matchesRowFamily(HStoreKey other) {
-    return this.row.compareTo(other.row) == 0
-        && extractFamily(this.column).compareTo(
-            extractFamily(other.getColumn())) == 0;
+  public boolean matchesRowFamily(HStoreKey that)
+  throws InvalidColumnNameException {
+    return this.row.compareTo(that.row) == 0 &&
+      extractFamily(this.column).
+        compareTo(extractFamily(that.getColumn())) == 0;
   }
   
   @Override
@@ -234,11 +283,9 @@
     int result = this.row.compareTo(other.row);
     if(result == 0) {
       result = this.column.compareTo(other.column);
-      
       if(result == 0) {
         if(this.timestamp < other.timestamp) {
           result = 1;
-          
         } else if(this.timestamp > other.timestamp) {
           result = -1;
         }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/KeyedData.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/KeyedData.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/KeyedData.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/KeyedData.java Mon Jun 18 15:59:14 2007
@@ -24,31 +24,30 @@
  ******************************************************************************/
 public class KeyedData implements Writable {
   HStoreKey key;
-  BytesWritable data;
+  byte [] data;
 
   /** Default constructor. Used by Writable interface */
   public KeyedData() {
     this.key = new HStoreKey();
-    this.data = new BytesWritable();
   }
 
   /**
    * Create a KeyedData object specifying the parts
-   * @param key         - HStoreKey
-   * @param data        - BytesWritable
+   * @param key HStoreKey
+   * @param data
    */
-  public KeyedData(HStoreKey key, BytesWritable data) {
+  public KeyedData(HStoreKey key, byte [] data) {
     this.key = key;
     this.data = data;
   }
 
-  /** @return - returns the key */
+  /** @return returns the key */
   public HStoreKey getKey() {
     return key;
   }
 
   /** @return - returns the value */
-  public BytesWritable getData() {
+  public byte [] getData() {
     return data;
   }
 
@@ -61,7 +60,8 @@
    */
   public void write(DataOutput out) throws IOException {
     key.write(out);
-    data.write(out);
+    out.writeShort(this.data.length);
+    out.write(this.data);
   }
   
   /* (non-Javadoc)
@@ -69,6 +69,7 @@
    */
   public void readFields(DataInput in) throws IOException {
     key.readFields(in);
-    data.readFields(in);
+    this.data = new byte[in.readShort()];
+    in.readFully(this.data);
   }
-}
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java Mon Jun 18 15:59:14 2007
@@ -16,14 +16,14 @@
 package org.apache.hadoop.hbase;
 
 
-/*******************************************************************************
+/**
  * LeaseListener is an interface meant to be implemented by users of the Leases 
  * class.
  *
  * It receives events from the Leases class about the status of its accompanying
  * lease.  Users of the Leases class can use a LeaseListener subclass to, for 
  * example, clean up resources after a lease has expired.
- ******************************************************************************/
+ */
 public interface LeaseListener {
   /** When a lease expires, this method is called. */
   public void leaseExpired();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java Mon Jun 18 15:59:14 2007
@@ -17,42 +17,41 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.*;
-
 import java.io.*;
 import java.util.*;
 
 /**
  * Leases
  *
- * There are several server classes in HBase that need to track external clients
- * that occasionally send heartbeats.
+ * There are several server classes in HBase that need to track external
+ * clients that occasionally send heartbeats.
  * 
- * These external clients hold resources in the server class.  Those resources 
- * need to be released if the external client fails to send a heartbeat after 
- * some interval of time passes.
- *
- * The Leases class is a general reusable class for this kind of pattern.
+ * <p>These external clients hold resources in the server class.
+ * Those resources need to be released if the external client fails to send a
+ * heartbeat after some interval of time passes.
  *
+ * <p>The Leases class is a general reusable class for this kind of pattern.
  * An instance of the Leases class will create a thread to do its dirty work.  
  * You should close() the instance if you want to clean up the thread properly.
  */
 public class Leases {
-  static final Log LOG = LogFactory.getLog(Leases.class.getName());
+  protected static final Log LOG = LogFactory.getLog(Leases.class.getName());
 
-  long leasePeriod;
-  long leaseCheckFrequency;
-  LeaseMonitor leaseMonitor;
-  Thread leaseMonitorThread;
-  TreeMap<Text, Lease> leases = new TreeMap<Text, Lease>();
-  TreeSet<Lease> sortedLeases = new TreeSet<Lease>();
-  boolean running = true;
+  protected final long leasePeriod;
+  protected final long leaseCheckFrequency;
+  private final LeaseMonitor leaseMonitor;
+  private final Thread leaseMonitorThread;
+  protected final Map<LeaseName, Lease> leases =
+    new HashMap<LeaseName, Lease>();
+  protected final TreeSet<Lease> sortedLeases = new TreeSet<Lease>();
+  protected boolean running = true;
 
   /**
    * Creates a lease
    * 
    * @param leasePeriod - length of time (milliseconds) that the lease is valid
-   * @param leaseCheckFrequency - how often the lease should be checked (milliseconds)
+   * @param leaseCheckFrequency - how often the lease should be checked
+   * (milliseconds)
    */
   public Leases(long leasePeriod, long leaseCheckFrequency) {
     this.leasePeriod = leasePeriod;
@@ -88,96 +87,93 @@
       LOG.debug("leases closed");
     }
   }
-  
-  String getLeaseName(final Text holderId, final Text resourceId) {
-    return "<holderId=" + holderId + ", resourceId=" + resourceId + ">";
-  }
 
-  /** A client obtains a lease... */
+  /* A client obtains a lease... */
+  
   /**
    * Obtain a lease
    * 
-   * @param holderId - name of lease holder
-   * @param resourceId - resource being leased
-   * @param listener - listener that will process lease expirations
+   * @param holderId id of lease holder
+   * @param resourceId id of resource being leased
+   * @param listener listener that will process lease expirations
    */
-  public void createLease(Text holderId, Text resourceId,
+  public void createLease(final long holderId, final long resourceId,
       final LeaseListener listener) {
+    LeaseName name = null;
     synchronized(leases) {
       synchronized(sortedLeases) {
         Lease lease = new Lease(holderId, resourceId, listener);
-        Text leaseId = lease.getLeaseId();
-        if(leases.get(leaseId) != null) {
-          throw new AssertionError("Impossible state for createLease(): Lease " +
-            getLeaseName(holderId, resourceId) + " is still held.");
+        name = lease.getLeaseName();
+        if(leases.get(name) != null) {
+          throw new AssertionError("Impossible state for createLease(): " +
+            "Lease " + name + " is still held.");
         }
-        leases.put(leaseId, lease);
+        leases.put(name, lease);
         sortedLeases.add(lease);
       }
     }
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Created lease " + getLeaseName(holderId, resourceId));
+      LOG.debug("Created lease " + name);
     }
   }
   
-  /** A client renews a lease... */
+  /* A client renews a lease... */
   /**
    * Renew a lease
    * 
-   * @param holderId - name of lease holder
-   * @param resourceId - resource being leased
+   * @param holderId id of lease holder
+   * @param resourceId id of resource being leased
    * @throws IOException
    */
-  public void renewLease(Text holderId, Text resourceId) throws IOException {
+  public void renewLease(final long holderId, final long resourceId)
+  throws IOException {
+    LeaseName name = null;
     synchronized(leases) {
       synchronized(sortedLeases) {
-        Text leaseId = createLeaseId(holderId, resourceId);
-        Lease lease = leases.get(leaseId);
-        if(lease == null) {
+        name = createLeaseName(holderId, resourceId);
+        Lease lease = leases.get(name);
+        if (lease == null) {
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
           throw new IOException("Cannot renew lease that is not held: " +
-            getLeaseName(holderId, resourceId));
+            name);
         }
-        
         sortedLeases.remove(lease);
         lease.renew();
         sortedLeases.add(lease);
       }
     }
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Renewed lease " + getLeaseName(holderId, resourceId));
+      LOG.debug("Renewed lease " + name);
     }
   }
 
   /**
    * Client explicitly cancels a lease.
    * 
-   * @param holderId - name of lease holder
-   * @param resourceId - resource being leased
+   * @param holderId id of lease holder
+   * @param resourceId id of resource being leased
    * @throws IOException
    */
-  public void cancelLease(Text holderId, Text resourceId) throws IOException {
+  public void cancelLease(final long holderId, final long resourceId)
+  throws IOException {
+    LeaseName name = null;
     synchronized(leases) {
       synchronized(sortedLeases) {
-        Text leaseId = createLeaseId(holderId, resourceId);
-        Lease lease = leases.get(leaseId);
-        if(lease == null) {
-          
+        name = createLeaseName(holderId, resourceId);
+        Lease lease = leases.get(name);
+        if (lease == null) {
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
-          
           throw new IOException("Cannot cancel lease that is not held: " +
-            getLeaseName(holderId, resourceId));
+            name);
         }
-        
         sortedLeases.remove(lease);
-        leases.remove(leaseId);
-
+        leases.remove(name);
       }
     }     
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Cancel lease " + getLeaseName(holderId, resourceId));
+      LOG.debug("Cancel lease " + name);
     }
   }
 
@@ -190,13 +186,10 @@
             Lease top;
             while((sortedLeases.size() > 0)
                 && ((top = sortedLeases.first()) != null)) {
-              
               if(top.shouldExpire()) {
-                leases.remove(top.getLeaseId());
+                leases.remove(top.getLeaseName());
                 sortedLeases.remove(top);
-
                 top.expired();
-              
               } else {
                 break;
               }
@@ -206,34 +199,92 @@
         try {
           Thread.sleep(leaseCheckFrequency);
         } catch (InterruptedException ie) {
-          // Ignore
+          // continue
         }
       }
     }
   }
+  
+  /*
+   * A Lease name.
+   * More lightweight than String or Text.
+   */
+  class LeaseName implements Comparable {
+    private final long holderId;
+    private final long resourceId;
+    
+    LeaseName(final long hid, final long rid) {
+      this.holderId = hid;
+      this.resourceId = rid;
+    }
+    
+    @Override
+    public boolean equals(Object obj) {
+      LeaseName other = (LeaseName)obj;
+      return this.holderId == other.holderId &&
+        this.resourceId == other.resourceId;
+    }
+    
+    @Override
+    public int hashCode() {
+      // Copy OR'ing from javadoc for Long#hashCode.
+      int result = (int)(this.holderId ^ (this.holderId >>> 32));
+      result ^= (int)(this.resourceId ^ (this.resourceId >>> 32));
+      return result;
+    }
+    
+    @Override
+    public String toString() {
+      return Long.toString(this.holderId) + "/" +
+        Long.toString(this.resourceId);
+    }
 
+    public int compareTo(Object obj) {
+      LeaseName other = (LeaseName)obj;
+      if (this.holderId < other.holderId) {
+        return -1;
+      }
+      if (this.holderId > other.holderId) {
+        return 1;
+      }
+      // holderIds are equal
+      if (this.resourceId < other.resourceId) {
+        return -1;
+      }
+      if (this.resourceId > other.resourceId) {
+        return 1;
+      }
+      // Objects are equal
+      return 0;
+    }
+  }
+  
   /** Create a lease id out of the holder and resource ids. */
-  Text createLeaseId(Text holderId, Text resourceId) {
-    return new Text("_" + holderId + "/" + resourceId + "_");
+  protected LeaseName createLeaseName(final long hid, final long rid) {
+    return new LeaseName(hid, rid);
   }
 
   /** This class tracks a single Lease. */
-  @SuppressWarnings("unchecked")
   private class Lease implements Comparable {
-    Text holderId;
-    Text resourceId;
-    LeaseListener listener;
+    final long holderId;
+    final long resourceId;
+    final LeaseListener listener;
     long lastUpdate;
+    private LeaseName leaseId;
 
-    Lease(Text holderId, Text resourceId, LeaseListener listener) {
+    Lease(final long holderId, final long resourceId,
+        final LeaseListener listener) {
       this.holderId = holderId;
       this.resourceId = resourceId;
       this.listener = listener;
       renew();
     }
     
-    Text getLeaseId() {
-      return createLeaseId(holderId, resourceId);
+    synchronized LeaseName getLeaseName() {
+      if (this.leaseId == null) {
+        this.leaseId = createLeaseName(holderId, resourceId);
+      }
+      return this.leaseId;
     }
     
     boolean shouldExpire() {
@@ -246,8 +297,7 @@
     
     void expired() {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Lease expired " + getLeaseName(this.holderId,
-          this.resourceId));
+        LOG.debug("Lease expired " + getLeaseName());
       }
       listener.leaseExpired();
     }
@@ -259,7 +309,7 @@
     
     @Override
     public int hashCode() {
-      int result = this.getLeaseId().hashCode();
+      int result = this.getLeaseName().hashCode();
       result ^= Long.valueOf(this.lastUpdate).hashCode();
       return result;
     }
@@ -272,14 +322,11 @@
       Lease other = (Lease) o;
       if(this.lastUpdate < other.lastUpdate) {
         return -1;
-        
       } else if(this.lastUpdate > other.lastUpdate) {
         return 1;
-        
       } else {
-        return this.getLeaseId().compareTo(other.getLeaseId());
+        return this.getLeaseName().compareTo(other.getLeaseName());
       }
     }
   }
-}
-
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java Mon Jun 18 15:59:14 2007
@@ -23,7 +23,7 @@
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.io.Text;
 
 /** Abstract base class for merge tests */
@@ -31,7 +31,7 @@
   protected static final Text COLUMN_NAME = new Text("contents:");
   protected Random rand;
   protected HTableDescriptor desc;
-  protected BytesWritable value;
+  protected ImmutableBytesWritable value;
 
   protected MiniDFSCluster dfsCluster;
   protected FileSystem fs;
@@ -52,7 +52,7 @@
       val.append(partialValue);
     }
     try {
-      value = new BytesWritable(val.toString().getBytes(HConstants.UTF8_ENCODING));
+      value = new ImmutableBytesWritable(val.toString().getBytes(HConstants.UTF8_ENCODING));
       
     } catch(UnsupportedEncodingException e) {
       fail();
@@ -125,7 +125,7 @@
       long lockid = region.startUpdate(new Text("row_"
           + String.format("%1$05d", i)));
 
-      region.put(lockid, COLUMN_NAME, value);
+      region.put(lockid, COLUMN_NAME, value.get());
       region.commit(lockid);
       if(i % 10000 == 0) {
         System.out.println("Flushing write #" + i);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java Mon Jun 18 15:59:14 2007
@@ -21,41 +21,44 @@
 import java.util.Iterator;
 import java.util.TreeMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
 /** Test case for get */
 public class TestGet extends HBaseTestCase {
+  private static final Log LOG = LogFactory.getLog(TestGet.class.getName());
+  
   private static final Text CONTENTS = new Text("contents:");
-  private static final Text ROW_KEY = new Text(HGlobals.rootRegionInfo.regionName);
+  private static final Text ROW_KEY =
+    new Text(HGlobals.rootRegionInfo.regionName);
+  private static final String SERVER_ADDRESS = "foo.bar.com:1234";
 
   
-  private void verifyGet(HRegion r) throws IOException {
+  private void verifyGet(final HRegion r, final String expectedServer)
+  throws IOException {
     // This should return a value because there is only one family member
-    
-    BytesWritable value = r.get(ROW_KEY, CONTENTS);
+    byte [] value = r.get(ROW_KEY, CONTENTS);
     assertNotNull(value);
     
     // This should not return a value because there are multiple family members
-    
     value = r.get(ROW_KEY, HConstants.COLUMN_FAMILY);
     assertNull(value);
     
     // Find out what getFull returns
+    TreeMap<Text, byte []> values = r.getFull(ROW_KEY);
     
-    TreeMap<Text, BytesWritable> values = r.getFull(ROW_KEY);
-    //assertEquals(4, values.keySet().size());
+    // assertEquals(4, values.keySet().size());
     for(Iterator<Text> i = values.keySet().iterator(); i.hasNext(); ) {
       Text column = i.next();
-      System.out.println(column);
-      if(column.equals(HConstants.COL_SERVER)) {
-        BytesWritable val = values.get(column);
-        byte[] bytes = new byte[val.getSize()];
-        System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-        System.out.println("  " + new String(bytes, HConstants.UTF8_ENCODING));
+      if (column.equals(HConstants.COL_SERVER)) {
+        byte [] val = values.get(column);
+        String server = new String(val, HConstants.UTF8_ENCODING);
+        assertEquals(expectedServer, server);
+        LOG.info(server);
       }
     }
   }
@@ -94,38 +97,35 @@
       ByteArrayOutputStream bytes = new ByteArrayOutputStream();
       DataOutputStream s = new DataOutputStream(bytes);
       CONTENTS.write(s);
-      r.put(lockid, CONTENTS, new BytesWritable(bytes.toByteArray()));
+      r.put(lockid, CONTENTS, bytes.toByteArray());
 
       bytes.reset();
       HGlobals.rootRegionInfo.write(s);
       
-      r.put(lockid, HConstants.COL_REGIONINFO, new BytesWritable(bytes.toByteArray()));
+      r.put(lockid, HConstants.COL_REGIONINFO, bytes.toByteArray());
       
       r.commit(lockid);
       
       lockid = r.startUpdate(ROW_KEY);
 
       r.put(lockid, HConstants.COL_SERVER, 
-          new BytesWritable(
-              new HServerAddress("foo.bar.com:1234").toString().getBytes(HConstants.UTF8_ENCODING)
-              )
+        new HServerAddress(SERVER_ADDRESS).toString().
+          getBytes(HConstants.UTF8_ENCODING)
       );
       
       r.put(lockid, HConstants.COL_STARTCODE, 
-          new BytesWritable(
-              String.valueOf(lockid).getBytes(HConstants.UTF8_ENCODING)
-              )
+        String.valueOf(lockid).getBytes(HConstants.UTF8_ENCODING)
       );
       
       r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"), 
-          new BytesWritable("region".getBytes(HConstants.UTF8_ENCODING)));
+        "region".getBytes(HConstants.UTF8_ENCODING));
 
       r.commit(lockid);
       
       // Verify that get works the same from memcache as when reading from disk
       // NOTE dumpRegion won't work here because it only reads from disk.
       
-      verifyGet(r);
+      verifyGet(r, SERVER_ADDRESS);
       
       // Close and re-open region, forcing updates to disk
       
@@ -135,27 +135,26 @@
       
       // Read it back
       
-      verifyGet(r);
+      verifyGet(r, SERVER_ADDRESS);
       
       // Update one family member and add a new one
       
       lockid = r.startUpdate(ROW_KEY);
 
       r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
-          new BytesWritable("region2".getBytes()));
+        "region2".getBytes());
 
+      String otherServerName = "bar.foo.com:4321";
       r.put(lockid, HConstants.COL_SERVER, 
-          new BytesWritable(
-              new HServerAddress("bar.foo.com:4321").toString().getBytes(HConstants.UTF8_ENCODING)
-              )
-      );
+        new HServerAddress(otherServerName).toString().
+          getBytes(HConstants.UTF8_ENCODING));
       
       r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "junk"),
-          new BytesWritable("junk".getBytes()));
+        "junk".getBytes());
       
       r.commit(lockid);
 
-      verifyGet(r);
+      verifyGet(r, otherServerName);
       
       // Close region and re-open it
       
@@ -165,7 +164,7 @@
 
       // Read it back
       
-      verifyGet(r);
+      verifyGet(r, otherServerName);
 
       // Close region once and for all
       

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java Mon Jun 18 15:59:14 2007
@@ -41,19 +41,14 @@
    * Since all the "tests" depend on the results of the previous test, they are
    * not Junit tests that can stand alone. Consequently we have a single Junit
    * test that runs the "sub-tests" as private methods.
+   * @throws IOException 
    */
-  public void testHBaseCluster() {
-    try {
-      setup();
-      basic();
-      scanner();
-      listTables();
-      cleanup();
-      
-    } catch(IOException e) {
-      e.printStackTrace();
-      fail();
-    }
+  public void testHBaseCluster() throws IOException {
+    setup();
+    basic();
+    scanner();
+    listTables();
+    cleanup();
   }
 
   public void tearDown() throws Exception {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java Mon Jun 18 15:59:14 2007
@@ -20,7 +20,6 @@
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile.Reader;
@@ -50,10 +49,10 @@
       try {
         // Write columns named 1, 2, 3, etc. and then values of single byte
         // 1, 2, 3...
-        TreeMap<Text, BytesWritable> cols = new TreeMap<Text, BytesWritable>();
+        TreeMap<Text, byte []> cols = new TreeMap<Text, byte []>();
         for (int i = 0; i < COL_COUNT; i++) {
           cols.put(new Text(Integer.toString(i)),
-              new BytesWritable(new byte[] { (byte)(i + '0') }));
+            new byte[] { (byte)(i + '0') });
         }
         long timestamp = System.currentTimeMillis();
         log.append(regionName, tableName, row, cols, timestamp);
@@ -71,7 +70,7 @@
           assertEquals(regionName, key.getRegionName());
           assertEquals(tableName, key.getTablename());
           assertEquals(row, key.getRow());
-          assertEquals((byte)(i + '0'), val.getVal().get()[0]);
+          assertEquals((byte)(i + '0'), val.getVal()[0]);
           System.out.println(key + " " + val);
         }
         while (reader.next(key, val)) {
@@ -80,7 +79,7 @@
           assertEquals(tableName, key.getTablename());
           assertEquals(HLog.METAROW, key.getRow());
           assertEquals(HLog.METACOLUMN, val.getColumn());
-          assertEquals(0, val.getVal().compareTo(COMPLETE_CACHEFLUSH));
+          assertEquals(0, COMPLETE_CACHEFLUSH.compareTo(val.getVal()));
           System.out.println(key + " " + val);
         }
       } finally {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java Mon Jun 18 15:59:14 2007
@@ -26,7 +26,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HMemcache.Snapshot;
-import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
 /** memcache test case */
@@ -85,10 +84,10 @@
    */
   private void addRows(final HMemcache hmc) {
     for (int i = 0; i < ROW_COUNT; i++) {
-      TreeMap<Text, BytesWritable> columns = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> columns = new TreeMap<Text, byte []>();
       for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
         Text k = getColumnName(i, ii);
-        columns.put(k, new BytesWritable(k.toString().getBytes()));
+        columns.put(k, k.toString().getBytes());
       }
       hmc.add(getRowName(i), columns, System.currentTimeMillis());
     }
@@ -111,7 +110,7 @@
       throws IOException {
     // Save off old state.
     int oldHistorySize = hmc.history.size();
-    TreeMap<HStoreKey, BytesWritable> oldMemcache = hmc.memcache;
+    TreeMap<HStoreKey, byte []> oldMemcache = hmc.memcache;
     // Run snapshot.
     Snapshot s = hmc.snapshotMemcacheForLog(log);
     // Make some assertions about what just happened.
@@ -147,7 +146,7 @@
   }
   
   private void isExpectedRow(final int rowIndex,
-      TreeMap<Text, BytesWritable> row) {
+      TreeMap<Text, byte []> row) {
     int i = 0;
     for (Text colname: row.keySet()) {
       String expectedColname =
@@ -158,10 +157,8 @@
       // 100 bytes in size at least. This is the default size
       // for BytesWriteable.  For comparison, comvert bytes to
       // String and trim to remove trailing null bytes.
-      BytesWritable value = row.get(colname);
-      byte[] bytes = new byte[value.getSize()];
-      System.arraycopy(value.get(), 0, bytes, 0, bytes.length);
-      String colvalueStr = new String(bytes).trim();
+      byte [] value = row.get(colname);
+      String colvalueStr = new String(value).trim();
       assertEquals("Content", colnameStr, colvalueStr);
     }
   }
@@ -171,7 +168,7 @@
     addRows(this.hmemcache);
     for (int i = 0; i < ROW_COUNT; i++) {
       HStoreKey hsk = new HStoreKey(getRowName(i));
-      TreeMap<Text, BytesWritable> all = this.hmemcache.getFull(hsk);
+      TreeMap<Text, byte []> all = this.hmemcache.getFull(hsk);
       isExpectedRow(i, all);
     }
   }
@@ -192,16 +189,16 @@
     HInternalScannerInterface scanner =
       this.hmemcache.getScanner(timestamp, cols, new Text());
     HStoreKey key = new HStoreKey();
-    TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
+    TreeMap<Text, byte []> results = new TreeMap<Text, byte []>();
     for (int i = 0; scanner.next(key, results); i++) {
       assertTrue("Row name",
           key.toString().startsWith(getRowName(i).toString()));
       assertEquals("Count of columns", COLUMNS_COUNT,
           results.size());
-      TreeMap<Text, BytesWritable> row = new TreeMap<Text, BytesWritable>();
-      for(Iterator<Map.Entry<Text, BytesWritable>> it = results.entrySet().iterator();
+      TreeMap<Text, byte []> row = new TreeMap<Text, byte []>();
+      for(Iterator<Map.Entry<Text, byte []>> it = results.entrySet().iterator();
           it.hasNext(); ) {
-        Map.Entry<Text, BytesWritable> e = it.next();
+        Map.Entry<Text, byte []> e = it.next();
         row.put(e.getKey(), e.getValue());
       }
       isExpectedRow(i, row);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java Mon Jun 18 15:59:14 2007
@@ -25,7 +25,6 @@
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
 import org.apache.log4j.Logger;
@@ -116,11 +115,8 @@
 
     for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
       long writeid = region.startUpdate(new Text("row_" + k));
-      region.put(writeid, CONTENTS_BASIC,
-          new BytesWritable((CONTENTSTR + k).getBytes()));
-
-      region.put(writeid, new Text(ANCHORNUM + k),
-          new BytesWritable((ANCHORSTR + k).getBytes()));
+      region.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
+      region.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
       region.commit(writeid);
     }
     System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
@@ -143,20 +139,16 @@
     for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
       Text rowlabel = new Text("row_" + k);
 
-      BytesWritable bodydata = region.get(rowlabel, CONTENTS_BASIC);
+      byte [] bodydata = region.get(rowlabel, CONTENTS_BASIC);
       assertNotNull(bodydata);
-      byte[] bytes = new byte[bodydata.getSize()];
-      System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
-      String bodystr = new String(bytes).toString().trim();
+      String bodystr = new String(bodydata).toString().trim();
       String teststr = CONTENTSTR + k;
       assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
           + "), expected: '" + teststr + "' got: '" + bodystr + "'",
           bodystr, teststr);
       collabel = new Text(ANCHORNUM + k);
       bodydata = region.get(rowlabel, collabel);
-      bytes = new byte[bodydata.getSize()];
-      System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
-      bodystr = new String(bytes).toString().trim();
+      bodystr = new String(bodydata).toString().trim();
       teststr = ANCHORSTR + k;
       assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
           + "), expected: '" + teststr + "' got: '" + bodystr + "'",
@@ -172,7 +164,7 @@
     // Try put with bad lockid.
     boolean exceptionThrown = false;
     try {
-      region.put(-1, CONTENTS_BASIC, new BytesWritable("bad input".getBytes()));
+      region.put(-1, CONTENTS_BASIC, "bad input".getBytes());
     } catch (LockException e) {
       exceptionThrown = true;
     }
@@ -185,7 +177,7 @@
       lockid = region.startUpdate(new Text("Some old key"));
       String unregisteredColName = "FamilyGroup:FamilyLabel";
       region.put(lockid, new Text(unregisteredColName),
-          new BytesWritable(unregisteredColName.getBytes()));
+        unregisteredColName.getBytes());
     } catch (IOException e) {
       exceptionThrown = true;
     } finally {
@@ -278,8 +270,8 @@
       String kLabel = String.format("%1$03d", k);
 
       long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
-      region.put(lockid, cols[0], new BytesWritable(vals1[k].getBytes()));
-      region.put(lockid, cols[1], new BytesWritable(vals1[k].getBytes()));
+      region.put(lockid, cols[0], vals1[k].getBytes());
+      region.put(lockid, cols[1], vals1[k].getBytes());
       region.commit(lockid);
       numInserted += 2;
     }
@@ -295,16 +287,13 @@
     int numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          BytesWritable val = curVals.get(col);
-          byte[] bytes = new byte[val.getSize()];
-          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-          int curval = Integer.parseInt(new String(bytes).trim());
-
+          byte [] val = curVals.get(col);
+          int curval = Integer.parseInt(new String(val).trim());
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
@@ -343,16 +332,13 @@
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          BytesWritable val = curVals.get(col);
-          byte[] bytes = new byte[val.getSize()];
-          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-          int curval = Integer.parseInt(new String(bytes).trim());
-
+          byte [] val = curVals.get(col);
+          int curval = Integer.parseInt(new String(val).trim());
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
@@ -382,8 +368,8 @@
       String kLabel = String.format("%1$03d", k);
       
       long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
-      region.put(lockid, cols[0], new BytesWritable(vals1[k].getBytes()));
-      region.put(lockid, cols[1], new BytesWritable(vals1[k].getBytes()));
+      region.put(lockid, cols[0], vals1[k].getBytes());
+      region.put(lockid, cols[1], vals1[k].getBytes());
       region.commit(lockid);
       numInserted += 2;
     }
@@ -399,16 +385,13 @@
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          BytesWritable val = curVals.get(col);
-          byte[] bytes = new byte[val.getSize()];
-          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-          int curval = Integer.parseInt(new String(bytes).trim());
-
+          byte [] val = curVals.get(col);
+          int curval = Integer.parseInt(new String(val).trim());
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
@@ -447,16 +430,13 @@
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          BytesWritable val = curVals.get(col);
-          byte[] bytes = new byte[val.getSize()];
-          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-          int curval = Integer.parseInt(new String(bytes).trim());
-
+          byte [] val = curVals.get(col);
+          int curval = Integer.parseInt(new String(val).trim());
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
               assertEquals("Value for " + col + " should be: " + k
@@ -485,16 +465,13 @@
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       int k = 500;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          BytesWritable val = curVals.get(col);
-          byte[] bytes = new byte[val.getSize()];
-          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-          int curval = Integer.parseInt(new String(bytes).trim());
-
+          byte [] val = curVals.get(col);
+          int curval = Integer.parseInt(new String(val).trim());
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
               assertEquals("Value for " + col + " should be: " + k
@@ -543,7 +520,7 @@
 
       // Write to the HRegion
       long writeid = region.startUpdate(new Text("row_" + k));
-      region.put(writeid, CONTENTS_BODY, new BytesWritable(buf1.toString().getBytes()));
+      region.put(writeid, CONTENTS_BODY, buf1.toString().getBytes());
       region.commit(writeid);
       if (k > 0 && k % (N_ROWS / 100) == 0) {
         System.out.println("Flushing write #" + k);
@@ -660,15 +637,13 @@
       int contentsFetched = 0;
       int anchorFetched = 0;
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          BytesWritable val = curVals.get(col);
-          byte[] bytes = new byte[val.getSize()];
-          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-          String curval = new String(bytes).trim();
+          byte [] val = curVals.get(col);
+          String curval = new String(val).trim();
 
           if(col.compareTo(CONTENTS_BASIC) == 0) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
@@ -715,15 +690,13 @@
     try {
       int numFetched = 0;
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          BytesWritable val = curVals.get(col);
-          byte[] bytes = new byte[val.getSize()];
-          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-          int curval = Integer.parseInt(new String(bytes).trim());
+          byte [] val = curVals.get(col);
+          int curval = Integer.parseInt(new String(val).trim());
 
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
@@ -754,13 +727,12 @@
       try {
         int numFetched = 0;
         HStoreKey curKey = new HStoreKey();
-        TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+        TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
         int k = 0;
         while(s.next(curKey, curVals)) {
           for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
             Text col = it.next();
-            BytesWritable val = curVals.get(col);
-
+            byte [] val = curVals.get(col);
             assertTrue(col.compareTo(CONTENTS_BODY) == 0);
             assertNotNull(val);
             numFetched++;
@@ -792,7 +764,7 @@
     try {
       int fetched = 0;
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           it.next();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java Mon Jun 18 15:59:14 2007
@@ -24,7 +24,6 @@
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Text;
 
@@ -52,8 +51,8 @@
   private DataInputBuffer in = new DataInputBuffer();
 
   /** Compare the HRegionInfo we read from HBase to what we stored */
-  private void validateRegionInfo(BytesWritable regionBytes) throws IOException {
-    in.reset(regionBytes.get(), regionBytes.getSize());
+  private void validateRegionInfo(byte [] regionBytes) throws IOException {
+    in.reset(regionBytes, regionBytes.length);
     HRegionInfo info = new HRegionInfo();
     info.readFields(in);
     
@@ -69,7 +68,7 @@
       throws IOException {
     
     HInternalScannerInterface scanner = null;
-    TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
+    TreeMap<Text, byte []> results = new TreeMap<Text, byte []>();
     HStoreKey key = new HStoreKey();
 
     Text[][] scanColumns = {
@@ -82,21 +81,15 @@
         scanner = region.getScanner(scanColumns[i], FIRST_ROW);
         while(scanner.next(key, results)) {
           assertTrue(results.containsKey(HConstants.COL_REGIONINFO));
-          BytesWritable val = results.get(HConstants.COL_REGIONINFO); 
-          byte[] bytes = new byte[val.getSize()];
-          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-          
-          validateRegionInfo(new BytesWritable(bytes));
-          
+          byte [] val = results.get(HConstants.COL_REGIONINFO); 
+          validateRegionInfo(val);
           if(validateStartcode) {
             assertTrue(results.containsKey(HConstants.COL_STARTCODE));
             val = results.get(HConstants.COL_STARTCODE);
             assertNotNull(val);
-            bytes = new byte[val.getSize()];
-            System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-            assertFalse(bytes.length == 0);
+            assertFalse(val.length == 0);
             long startCode = 
-              Long.valueOf(new String(bytes, HConstants.UTF8_ENCODING));
+              Long.valueOf(new String(val, HConstants.UTF8_ENCODING));
             assertEquals(START_CODE, startCode);
           }
           
@@ -104,10 +97,8 @@
             assertTrue(results.containsKey(HConstants.COL_SERVER));
             val = results.get(HConstants.COL_SERVER);
             assertNotNull(val);
-            bytes = new byte[val.getSize()];
-            System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
-            assertFalse(bytes.length == 0);
-            String server = new String(bytes, HConstants.UTF8_ENCODING);
+            assertFalse(val.length == 0);
+            String server = new String(val, HConstants.UTF8_ENCODING);
             assertEquals(0, server.compareTo(serverName));
           }
           results.clear();
@@ -128,7 +119,7 @@
 
   /** Use get to retrieve the HRegionInfo and validate it */
   private void getRegionInfo() throws IOException {
-    BytesWritable bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO);
+    byte [] bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO);
     validateRegionInfo(bytes);  
   }
  
@@ -163,8 +154,7 @@
       ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
       DataOutputStream s = new DataOutputStream(byteStream);
       HGlobals.rootRegionInfo.write(s);
-      region.put(lockid, HConstants.COL_REGIONINFO,
-          new BytesWritable(byteStream.toByteArray()));
+      region.put(lockid, HConstants.COL_REGIONINFO, byteStream.toByteArray());
       region.commit(lockid);
 
       // What we just committed is in the memcache. Verify that we can get
@@ -191,11 +181,10 @@
       lockid = region.startUpdate(ROW_KEY);
 
       region.put(lockid, HConstants.COL_SERVER, 
-          new BytesWritable(address.toString().getBytes(HConstants.UTF8_ENCODING)));
+        address.toString().getBytes(HConstants.UTF8_ENCODING));
 
       region.put(lockid, HConstants.COL_STARTCODE, 
-          new BytesWritable(
-              String.valueOf(START_CODE).getBytes(HConstants.UTF8_ENCODING)));
+        String.valueOf(START_CODE).getBytes(HConstants.UTF8_ENCODING));
 
       region.commit(lockid);
       
@@ -232,7 +221,7 @@
       lockid = region.startUpdate(ROW_KEY);
 
       region.put(lockid, HConstants.COL_SERVER, 
-          new BytesWritable(address.toString().getBytes(HConstants.UTF8_ENCODING)));
+        address.toString().getBytes(HConstants.UTF8_ENCODING));
 
       region.commit(lockid);
       

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java?view=diff&rev=548523&r1=548522&r2=548523
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java Mon Jun 18 15:59:14 2007
@@ -89,10 +89,7 @@
         }
 
         for (int i = 0; i < values.length; i++) {
-          byte[] bytes = new byte[values[i].getData().getSize()];
-          System.arraycopy(values[i].getData().get(), 0, bytes, 0,
-            bytes.length);
-          results.put(values[i].getKey().getColumn(), bytes);
+          results.put(values[i].getKey().getColumn(), values[i].getData());
         }
 
         HRegionInfo info = HRegion.getRegionInfo(results);



Mime
View raw message