hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r656868 [7/10] - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/hql/ src/java/org/apache/hadoop/hbase/io/ src/j...
Date Thu, 15 May 2008 22:10:50 GMT
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java Thu May 15 15:10:47 2008
@@ -24,13 +24,19 @@
 import java.io.IOException;
 
 /**
- * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
- * <a href="http://burtleburtle.net/bob/c/lookup3.c">lookup3.c</a>
+ * Produces 32-bit hash for hash table lookup.
+ * 
+ * <pre>lookup3.c, by Bob Jenkins, May 2006, Public Domain.
  *
  * You can use this free for any purpose.  It's in the public domain.
  * It has no warranty.
+ * </pre>
  * 
- * Produces 32-bit hash for hash table lookup.
+ * @see <a href="http://burtleburtle.net/bob/c/lookup3.c">lookup3.c</a>
+ * @see <a href="http://www.ddj.com/184410284">Hash Functions (and how this
+ * function compares to others such as CRC, MD?, etc</a>
+ * @see <a href="http://burtleburtle.net/bob/hash/doobs.html">Has update on the
+ * Dr. Dobbs Article</a>
  */
 public class JenkinsHash {
   private static long INT_MASK  = 0x00000000ffffffffL;
@@ -44,6 +50,16 @@
   /**
    * Alternate form for hashing an entire byte array
    * 
+   * @param bytes
+   * @return hash value
+   */
+  public static int hash(byte[] bytes) {
+    return hash(bytes, bytes.length, -1);
+  }
+  
+  /**
+   * Alternate form for hashing an entire byte array
+   * 
    * @param bytes 
    * @param initval
    * @return hash value
@@ -62,18 +78,19 @@
    * return value.  Two keys differing by one or two bits will have totally
    * different hash values.
    * 
-   * The best hash table sizes are powers of 2.  There is no need to do mod a
-   * prime (mod is sooo slow!).  If you need less than 32 bits, use a bitmask.
-   * For example, if you need only 10 bits, do h = (h & hashmask(10));
+   * <p>The best hash table sizes are powers of 2.  There is no need to do mod
+   * a prime (mod is sooo slow!).  If you need less than 32 bits, use a bitmask.
+   * For example, if you need only 10 bits, do
+   * <code>h = (h & hashmask(10));</code>
    * In which case, the hash table should have hashsize(10) elements.
    * 
-   * If you are hashing n strings byte[][] k, do it like this:
+   * <p>If you are hashing n strings byte[][] k, do it like this:
    * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h);
    * 
-   * By Bob Jenkins, 2006.  bob_jenkins@burtleburtle.net.  You may use this
+   * <p>By Bob Jenkins, 2006.  bob_jenkins@burtleburtle.net.  You may use this
    * code any way you wish, private, educational, or commercial.  It's free.
    * 
-   * Use for hash table lookup, or anything where one collision in 2^^32 is
+   * <p>Use for hash table lookup, or anything where one collision in 2^^32 is
    * acceptable.  Do NOT use for cryptographic purposes.
   */
   public static int hash(byte[] key, int nbytes, int initval) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Merge.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Merge.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Merge.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Merge.java Thu May 15 15:10:47 2008
@@ -27,7 +27,6 @@
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
@@ -50,9 +49,9 @@
   private final HBaseConfiguration conf;
   private Path rootdir;
   private volatile MetaUtils utils;
-  private Text tableName;               // Name of table
-  private volatile Text region1;        // Name of region 1
-  private volatile Text region2;        // Name of region 2
+  private byte [] tableName;               // Name of table
+  private volatile byte [] region1;        // Name of region 1
+  private volatile byte [] region2;        // Name of region 2
   private volatile boolean isMetaTable;
   private volatile HRegionInfo mergeInfo;
 
@@ -109,7 +108,7 @@
       return 0;
     } catch (Exception e) {
       LOG.fatal("Merge failed", e);
-      utils.scanMetaRegion(HRegionInfo.firstMetaRegionInfo,
+      utils.scanMetaRegion(HRegionInfo.FIRST_META_REGIONINFO,
           new MetaUtils.ScannerListener() {
             public boolean processRow(HRegionInfo info) {
               System.err.println(info.toString());
@@ -154,12 +153,12 @@
   
   private static class MetaScannerListener
   implements MetaUtils.ScannerListener {
-    private final Text region1;
-    private final Text region2;
+    private final byte [] region1;
+    private final byte [] region2;
     private HRegionInfo meta1 = null;
     private HRegionInfo meta2 = null;
     
-    MetaScannerListener(Text region1, Text region2) {
+    MetaScannerListener(final byte [] region1, final byte [] region2) {
       this.region1 = region1;
       this.region2 = region2;
     }
@@ -214,7 +213,7 @@
     }
 
     HRegion metaRegion2 = null;
-    if (meta1.getRegionName().equals(meta2.getRegionName())) {
+    if (Bytes.equals(meta1.getRegionName(), meta2.getRegionName())) {
       metaRegion2 = metaRegion1;
     } else {
       metaRegion2 = utils.getMetaRegion(meta2);
@@ -236,9 +235,9 @@
           merged.getRegionName());
     }
     HRegion mergeMeta = null;
-    if (mergedInfo.getRegionName().equals(meta1.getRegionName())) {
+    if (Bytes.equals(mergedInfo.getRegionName(), meta1.getRegionName())) {
       mergeMeta = metaRegion1;
-    } else if (mergedInfo.getRegionName().equals(meta2.getRegionName())) {
+    } else if (Bytes.equals(mergedInfo.getRegionName(), meta2.getRegionName())) {
       mergeMeta = metaRegion2;
     } else {
       mergeMeta = utils.getMetaRegion(mergedInfo);
@@ -330,32 +329,30 @@
       usage();
       return -1;
     }
-    tableName = new Text(remainingArgs[0]);
-    isMetaTable = tableName.compareTo(HConstants.META_TABLE_NAME) == 0;
+    tableName = Bytes.toBytes(remainingArgs[0]);
+    isMetaTable = Bytes.compareTo(tableName, HConstants.META_TABLE_NAME) == 0;
     
-    region1 = new Text(remainingArgs[1]);
-    region2 = new Text(remainingArgs[2]);
+    region1 = Bytes.toBytes(remainingArgs[1]);
+    region2 = Bytes.toBytes(remainingArgs[2]);
     int status = 0;
-    // Why we duplicate code here? St.Ack
-    if (WritableComparator.compareBytes(
-        tableName.getBytes(), 0, tableName.getLength(),
-        region1.getBytes(), 0, tableName.getLength()) != 0) {
-      LOG.error("Region " + region1 + " does not belong to table " + tableName);
+    if (notInTable(tableName, region1) || notInTable(tableName, region2)) {
       status = -1;
-    }
-    if (WritableComparator.compareBytes(
-        tableName.getBytes(), 0, tableName.getLength(),
-        region2.getBytes(), 0, tableName.getLength()) != 0) {
-      LOG.error("Region " + region2 + " does not belong to table " + tableName);
-      status = -1;
-    }
-    if (region1.equals(region2)) {
+    } else if (Bytes.equals(region1, region2)) {
       LOG.error("Can't merge a region with itself");
       status = -1;
     }
     return status;
   }
   
+  private boolean notInTable(final byte [] tn, final byte [] rn) {
+    if (WritableComparator.compareBytes(tn, 0, tn.length, rn, 0, tn.length) != 0) {
+      LOG.error("Region " + Bytes.toString(rn) + " does not belong to table " +
+        Bytes.toString(tn));
+      return true;
+    }
+    return false;
+  }
+  
   private void usage() {
     System.err.println(
         "Usage: bin/hbase merge <table-name> <region-1> <region-2>\n");

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/MetaUtils.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/MetaUtils.java Thu May 15 15:10:47 2008
@@ -22,9 +22,10 @@
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -40,7 +41,6 @@
 import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.io.Text;
 
 /**
  * Contains utility methods for manipulating HBase meta tables
@@ -54,7 +54,8 @@
   private Path rootdir;
   private HLog log;
   private HRegion rootRegion;
-  private ConcurrentHashMap<Text, HRegion> metaRegions;
+  private Map<byte [], HRegion> metaRegions = Collections.synchronizedSortedMap(
+    new TreeMap<byte [], HRegion>(Bytes.BYTES_COMPARATOR));
   
   /** Default constructor */
   public MetaUtils() {
@@ -67,7 +68,6 @@
     conf.setInt("hbase.client.retries.number", 1);
     this.initialized = false;
     this.rootRegion = null;
-    this.metaRegions = new ConcurrentHashMap<Text, HRegion>();
   }
 
   /**
@@ -220,7 +220,8 @@
 
     try {
       HStoreKey key = new HStoreKey();
-      SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+      SortedMap<byte [], byte[]> results =
+        new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
       while (rootScanner.next(key, results)) {
         HRegionInfo info = Writables.getHRegionInfoOrNull(
             results.get(HConstants.COL_REGIONINFO));
@@ -234,7 +235,6 @@
         }
         results.clear();
       }
-
     } finally {
       rootScanner.close();
     }
@@ -252,28 +252,38 @@
    * @throws IOException
    */
   public void scanMetaRegion(HRegionInfo metaRegionInfo,
-      ScannerListener listener) throws IOException {
+    ScannerListener listener)
+  throws IOException {
     if (!initialized) {
       throw new IllegalStateException("Must call initialize method first.");
     }
-    
     // Open meta region so we can scan it
-
     HRegion metaRegion = openMetaRegion(metaRegionInfo);
-
-    InternalScanner metaScanner = metaRegion.getScanner(
-        HConstants.COL_REGIONINFO_ARRAY, HConstants.EMPTY_START_ROW,
-        HConstants.LATEST_TIMESTAMP, null);
-
+    scanMetaRegion(metaRegion, listener);
+  }
+  
+  /**
+   * Scan the passed in metaregion <code>m</code> invoking the passed
+   * <code>listener</code> per row found.
+   * @param m
+   * @param listener
+   * @throws IOException
+   */
+  public void scanMetaRegion(final HRegion m,
+      final ScannerListener listener)
+  throws IOException {
+    InternalScanner metaScanner = m.getScanner(HConstants.COL_REGIONINFO_ARRAY,
+      HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
     try {
       HStoreKey key = new HStoreKey();
-      SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+      SortedMap<byte[], byte[]> results =
+        new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
       while (metaScanner.next(key, results)) {
-        HRegionInfo info = Writables.getHRegionInfoOrNull(
-            results.get(HConstants.COL_REGIONINFO));
+        HRegionInfo info =
+          Writables.getHRegionInfoOrNull(results.get(HConstants.COL_REGIONINFO));
         if (info == null) {
-          LOG.warn("region info is null for row " + key.getRow() +
-              " in table " + HConstants.META_TABLE_NAME);
+          LOG.warn("regioninfo null for row " + key.getRow() + " in table " +
+            Bytes.toString(m.getTableDesc().getName()));
           continue;
         }
         if (!listener.processRow(info)) {
@@ -281,14 +291,13 @@
         }
         results.clear();
       }
-
     } finally {
       metaScanner.close();
     }
   }
   
   private void openRootRegion() throws IOException {
-    this.rootRegion = HRegion.openHRegion(HRegionInfo.rootRegionInfo,
+    this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO,
         this.rootdir, this.log, this.conf);
     this.rootRegion.compactStores();
   }
@@ -314,7 +323,7 @@
    * @throws IOException
    */
   public static void changeOnlineStatus (final HBaseConfiguration c,
-      final Text row, final boolean onlineOffline)
+      final byte [] row, final boolean onlineOffline)
   throws IOException {
     HTable t = new HTable(c, HConstants.META_TABLE_NAME);
     Cell cell = t.get(row, HConstants.COL_REGIONINFO);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java Thu May 15 15:10:47 2008
@@ -44,8 +44,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
-import org.apache.hadoop.io.Text;
-
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -231,7 +229,7 @@
       // find root region
 
       String rootRegion = OLD_PREFIX +
-        HRegionInfo.rootRegionInfo.getEncodedName();
+        HRegionInfo.ROOT_REGIONINFO.getEncodedName();
 
       if (!fs.exists(new Path(rootdir, rootRegion))) {
         throw new IOException("Cannot find root region " + rootRegion);
@@ -264,7 +262,7 @@
   
   private void checkNewRootRegionDirExists() throws IOException {
     Path rootRegionDir =
-      HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
+      HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
     newRootRegion = fs.exists(rootRegionDir);
     migrationNeeded = !newRootRegion;
   }
@@ -340,8 +338,8 @@
     }
   }
   
-  void migrateRegionDir(Text tableName, String oldPath)throws IOException {
-
+  void migrateRegionDir(final byte [] tableName, String oldPath)
+  throws IOException {
     // Create directory where table will live
 
     Path tableDir = new Path(rootdir, tableName.toString());

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/SoftSortedMap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/SoftSortedMap.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/SoftSortedMap.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/SoftSortedMap.java Thu May 15 15:10:47 2008
@@ -38,15 +38,22 @@
  * play well with the GC when in a low-memory situation.
  */
 public class SoftSortedMap<K,V> implements SortedMap<K,V> {
-  protected static final Log LOG = LogFactory.getLog(SoftSortedMap.class);  
-  
-  protected SortedMap<K, SoftValue<K,V>> internalMap = 
-    new TreeMap<K, SoftValue<K,V>>();
-  
-  protected ReferenceQueue referenceQueue = new ReferenceQueue();
+  private static final Log LOG = LogFactory.getLog(SoftSortedMap.class);  
+  private final SortedMap<K, SoftValue<K,V>> internalMap;
+  private ReferenceQueue<K> referenceQueue = new ReferenceQueue<K>();
   
   /** Constructor */
-  public SoftSortedMap() {}
+  public SoftSortedMap() {
+    this(new TreeMap<K, SoftValue<K,V>>());
+  }
+  
+  /**
+   * Constructor
+   * @param c
+   */
+  public SoftSortedMap(final Comparator<K> c) {
+    this(new TreeMap<K, SoftValue<K,V>>(c));
+  }
   
   /** For headMap and tailMap support */
   private SoftSortedMap(SortedMap<K,SoftValue<K,V>> original) {
@@ -61,24 +68,22 @@
     return oldValue == null ? null : oldValue.get();
   }
   
-  public void putAll(Map map) {
+  @SuppressWarnings("unchecked")
+  public void putAll(@SuppressWarnings("unused") Map map) {
     throw new RuntimeException("Not implemented");
   }
   
   public V get(Object key) {
     checkReferences();
     SoftValue<K,V> value = internalMap.get(key);
-    
     if (value == null) {
       return null;
-    } else {
-      if (value.get() == null) {
-        internalMap.remove(key);
-        return null;
-      } else {
-        return value.get();
-      }
     }
+    if (value.get() == null) {
+      internalMap.remove(key);
+      return null;
+    }
+    return value.get();
   }
 
   public V remove(Object key) {
@@ -92,7 +97,7 @@
     return internalMap.containsKey(key);
   }
   
-  public boolean containsValue(Object value) {
+  public boolean containsValue(@SuppressWarnings("unused") Object value) {
 /*    checkReferences();
     return internalMap.containsValue(value);*/
     throw new UnsupportedOperationException("Don't support containsValue!");
@@ -141,6 +146,7 @@
     return internalMap.keySet();
   }
 
+  @SuppressWarnings("unchecked")
   public Comparator comparator() {
     return internalMap.comparator();
   }
@@ -169,11 +175,13 @@
    */ 
   private void checkReferences() {
     SoftValue<K,V> sv;
-    while((sv = (SoftValue<K,V>)referenceQueue.poll()) != null) {
+    Object obj;
+    while((obj = referenceQueue.poll()) != null) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Reference for key " + sv.key.toString() + " has been cleared.");
+        LOG.debug("Reference for key " + ((SoftValue<K,V>)obj).key.toString() +
+          " has been cleared.");
       }
-      internalMap.remove(sv.key);
+      internalMap.remove(((SoftValue<K,V>)obj).key);
     }
   }
   

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Writables.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Writables.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Writables.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Writables.java Thu May 15 15:10:47 2008
@@ -22,14 +22,11 @@
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.hbase.io.Cell;
 
 /**
  * Utility class with methods for manipulating Writable objects
@@ -121,9 +118,8 @@
   public static HRegionInfo getHRegionInfo(final Cell cell) throws IOException {
     if (cell == null) {
       return null;
-    } else {
-      return getHRegionInfo(cell.getValue());
     }
+    return getHRegionInfo(cell.getValue());
   }
 
   /**
@@ -135,10 +131,18 @@
    */
   public static Writable copyWritable(final Writable src, final Writable tgt)
   throws IOException {
-    if (src == null || tgt == null) {
-      throw new IllegalArgumentException("Writables cannot be null");
-    }
-    byte [] bytes = getBytes(src);
+    return copyWritable(getBytes(src), tgt);
+  }
+
+  /**
+   * Copy one Writable to another.  Copies bytes using data streams.
+   * @param bytes Source Writable
+   * @param tgt Target Writable
+   * @return The target Writable.
+   * @throws IOException
+   */
+  public static Writable copyWritable(final byte [] bytes, final Writable tgt)
+  throws IOException {
     DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes));
     try {
       tgt.readFields(dis);
@@ -147,73 +151,26 @@
     }
     return tgt;
   }
-  
-  /**
-   * Convert a long value to a byte array
-   * @param val
-   * @return the byte array
-   * @throws IOException
-   */
-  public static byte[] longToBytes(long val) throws IOException {
-    return getBytes(new LongWritable(val));
-  }
-  
-  /**
-   * Converts a byte array to a long value
-   * @param bytes
-   * @return the long value
-   * @throws IOException
-   */
-  public static long bytesToLong(byte[] bytes) throws IOException {
-    if (bytes == null || bytes.length == 0) {
-      return -1L;
-    }
-    return ((LongWritable) getWritable(bytes, new LongWritable())).get();
-  }
-  
-  /**
-   * Converts a string to a byte array in a consistent manner.
-   * @param s
-   * @return the byte array
-   * @throws UnsupportedEncodingException
-   */
-  public static byte[] stringToBytes(String s)
-  throws UnsupportedEncodingException {
-    if (s == null) {
-      throw new IllegalArgumentException("string cannot be null");
-    }
-    return s.getBytes(HConstants.UTF8_ENCODING);
-  }
-  
+
   /**
-   * Converts a byte array to a string in a consistent manner.
-   * @param bytes
-   * @return the string
-   * @throws UnsupportedEncodingException
+   * @param c
+   * @return Cell value as a UTF-8 String
    */
-  public static String bytesToString(byte[] bytes)
-  throws UnsupportedEncodingException {
-    if (bytes == null || bytes.length == 0) {
-      return "";
-    }
-    return new String(bytes, HConstants.UTF8_ENCODING);
-  }
-  
-  public static String cellToString(Cell c) 
-  throws UnsupportedEncodingException {
+  public static String cellToString(Cell c) {
     if (c == null) {
       return "";
-    } else {
-      return bytesToString(c.getValue());
     }
+    return Bytes.toString(c.getValue());
   }
   
-  public static long cellToLong(Cell c) 
-  throws IOException {
+  /**
+   * @param c
+   * @return Cell as a long.
+   */
+  public static long cellToLong(Cell c) {
     if (c == null) {
       return 0;
-    } else {
-      return bytesToLong(c.getValue());
     }
+    return Bytes.toLong(c.getValue());
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java Thu May 15 15:10:47 2008
@@ -23,19 +23,17 @@
 import java.io.UnsupportedEncodingException;
 import java.util.Random;
 
-import org.apache.hadoop.dfs.MiniDFSCluster;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.log4j.Logger;
 
 /** Abstract base class for merge tests */
 public abstract class AbstractMergeTestBase extends HBaseClusterTestCase {
   static final Logger LOG =
     Logger.getLogger(AbstractMergeTestBase.class.getName());
-  protected static final Text COLUMN_NAME = new Text("contents:");
+  protected static final byte [] COLUMN_NAME = Bytes.toBytes("contents:");
   protected final Random rand = new Random();
   protected HTableDescriptor desc;
   protected ImmutableBytesWritable value;
@@ -65,8 +63,8 @@
     } catch (UnsupportedEncodingException e) {
       fail();
     }
-    desc = new HTableDescriptor("test");
-    desc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
+    desc = new HTableDescriptor(Bytes.toBytes("test"));
+    desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
   }
 
   @Override
@@ -90,8 +88,8 @@
     // To ensure that the first region is larger than 64MB we need to write at
     // least 65536 rows. We will make certain by writing 70000
 
-    Text row_70001 = new Text("row_70001");
-    Text row_80001 = new Text("row_80001");
+    byte [] row_70001 = Bytes.toBytes("row_70001");
+    byte [] row_80001 = Bytes.toBytes("row_80001");
     
     HRegion[] regions = {
       createAregion(null, row_70001, 1, 70000),
@@ -102,9 +100,9 @@
     // Now create the root and meta regions and insert the data regions
     // created above into the meta
     
-    HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo,
+    HRegion root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO,
       testDir, this.conf);
-    HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo,
+    HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
       testDir, this.conf);
     HRegion.addRegionToMETA(root, meta);
     
@@ -118,16 +116,17 @@
     meta.getLog().closeAndDelete();
   }
 
-  private HRegion createAregion(Text startKey, Text endKey, int firstRow,
+  private HRegion createAregion(byte [] startKey, byte [] endKey, int firstRow,
       int nrows) throws IOException {
     
     HRegion region = createNewHRegion(desc, startKey, endKey);
     
-    System.out.println("created region " + region.getRegionName());
+    System.out.println("created region " +
+        Bytes.toString(region.getRegionName()));
 
     HRegionIncommon r = new HRegionIncommon(region);
     for(int i = firstRow; i < firstRow + nrows; i++) {
-      BatchUpdate batchUpdate = new BatchUpdate(new Text("row_"
+      BatchUpdate batchUpdate = new BatchUpdate(Bytes.toBytes("row_"
           + String.format("%1$05d", i)));
 
       batchUpdate.put(COLUMN_NAME, value.get());

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java Thu May 15 15:10:47 2008
@@ -26,39 +26,35 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Scanner;
 import org.apache.hadoop.hbase.filter.RegExpRowFilter;
 import org.apache.hadoop.hbase.filter.RowFilterInterface;
 import org.apache.hadoop.hbase.filter.RowFilterSet;
 import org.apache.hadoop.hbase.filter.StopRowFilter;
 import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.Scanner;
-
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
 
 /**
  * Additional scanner tests.
- * {@link TestScanner} does a custom setup/takedown not conducive
+ * {@link org.apache.hadoop.hbase.regionserver.TestScanner} does a custom
+ * setup/takedown not conducive
  * to addition of extra scanning tests.
  *
  * <p>Temporarily disabled until hudson stabilizes again.
- * @see TestScanner
+ * @see org.apache.hadoop.hbase.regionserver.TestScanner
  */
 public class DisabledTestScanner2 extends HBaseClusterTestCase {
   final Log LOG = LogFactory.getLog(this.getClass().getName());
@@ -90,9 +86,9 @@
    */
   public void testScanningMultipleFamiliesOfDifferentVintage()
   throws MasterNotRunningException, IOException {
-    Text tableName = new Text(getName());
-    final Text [] families = createTable(new HBaseAdmin(this.conf), tableName);
-    HTable table = new HTable(this.conf, tableName);
+    final byte [][] families = createTable(new HBaseAdmin(this.conf),
+      getName());
+    HTable table = new HTable(this.conf, getName());
     Scanner scanner = null;
     try {
       long time = System.currentTimeMillis();
@@ -101,20 +97,20 @@
         final byte [] lastKey = new byte [] {'a', 'a', (byte)('b' + i)};
         Incommon inc = new HTableIncommon(table);
         addContent(inc, families[i].toString(),
-          START_KEY_BYTES, new Text(lastKey), time + (1000 * i));
+          START_KEY_BYTES, lastKey, time + (1000 * i));
         // Add in to the first store a record that is in excess of the stop
         // row specified below setting up the scanner filter.  Add 'bbb'.
         // Use a stop filter of 'aad'.  The store scanner going to 'bbb' was
         // flipping the switch in StopRowFilter stopping us returning all
         // of the rest of the other store content.
         if (i == 0) {
-          BatchUpdate batchUpdate = new BatchUpdate(new Text("bbb"));
+          BatchUpdate batchUpdate = new BatchUpdate(Bytes.toBytes("bbb"));
           batchUpdate.put(families[0], "bbb".getBytes());
           inc.commit(batchUpdate);
         }
       }
       RowFilterInterface f =
-        new WhileMatchRowFilter(new StopRowFilter(new Text("aad")));
+        new WhileMatchRowFilter(new StopRowFilter(Bytes.toBytes("aad")));
       scanner = table.getScanner(families, HConstants.EMPTY_START_ROW,
         HConstants.LATEST_TIMESTAMP, f);
       int count = 0;
@@ -132,14 +128,14 @@
    * @throws Exception
    */
   public void testStopRow() throws Exception {
-    Text tableName = new Text(getName());
-    createTable(new HBaseAdmin(this.conf), tableName);
-    HTable table = new HTable(this.conf, tableName);
+    createTable(new HBaseAdmin(this.conf), getName());
+    HTable table = new HTable(this.conf, getName());
     final String lastKey = "aac";
     addContent(new HTableIncommon(table), FIRST_COLKEY + ":");
-    Scanner scanner =
-      table.getScanner(new Text [] {new Text(FIRST_COLKEY + ":")},
-          HConstants.EMPTY_START_ROW, new Text(lastKey));
+    byte [][] cols = new byte [1][];
+    cols[0] = Bytes.toBytes(FIRST_COLKEY + ":");
+    Scanner scanner = table.getScanner(cols,
+      HConstants.EMPTY_START_ROW, Bytes.toBytes(lastKey));
     for (RowResult e: scanner) {
       if(e.getRow().toString().compareTo(lastKey) >= 0) {
         LOG.info(e.getRow());
@@ -170,16 +166,15 @@
     HBaseAdmin admin = new HBaseAdmin(conf);
     
     // Setup colkeys to be inserted
-    Text tableName = new Text(getName());
-    createTable(admin, tableName);
-    HTable table = new HTable(this.conf, tableName);
+    createTable(admin, getName());
+    HTable table = new HTable(this.conf, getName());
     // Add a row to columns without qualifiers and then two with.  Make one
     // numbers only so easy to find w/ a regex.
-    BatchUpdate batchUpdate = new BatchUpdate(new Text(getName()));
+    BatchUpdate batchUpdate = new BatchUpdate(getName());
     final String firstColkeyFamily = Character.toString(FIRST_COLKEY) + ":";
-    batchUpdate.put(new Text(firstColkeyFamily + getName()), GOOD_BYTES);
-    batchUpdate.put(new Text(firstColkeyFamily + "22222"), GOOD_BYTES);
-    batchUpdate.put(new Text(firstColkeyFamily), GOOD_BYTES);
+    batchUpdate.put(firstColkeyFamily + getName(), GOOD_BYTES);
+    batchUpdate.put(firstColkeyFamily + "22222", GOOD_BYTES);
+    batchUpdate.put(firstColkeyFamily, GOOD_BYTES);
     table.commit(batchUpdate);
     // Now do a scan using a regex for a column name.
     checkRegexingScanner(table, firstColkeyFamily + "\\d+");
@@ -197,12 +192,12 @@
   private void checkRegexingScanner(final HTable table, 
     final String regexColumnname) 
   throws IOException {
-    Text [] regexCol = new Text [] {new Text(regexColumnname)};
-    Scanner scanner =
-      table.getScanner(regexCol, HConstants.EMPTY_START_ROW);
+    byte [][] regexCols = new byte[1][];
+    regexCols[0] = Bytes.toBytes(regexColumnname);
+    Scanner scanner = table.getScanner(regexCols, HConstants.EMPTY_START_ROW);
     int count = 0;
     for (RowResult r : scanner) {
-      for (Text c: r.keySet()) {
+      for (byte [] c: r.keySet()) {
         System.out.println(c);
         assertTrue(c.toString().matches(regexColumnname));
         count++;
@@ -222,14 +217,13 @@
     HBaseAdmin admin = new HBaseAdmin(conf);
     
     // Setup colkeys to be inserted
-    Text tableName = new Text(getName());
-    Text [] colKeys = createTable(admin, tableName);
+    byte [][] colKeys = createTable(admin, getName());
     assertTrue("Master is running.", admin.isMasterRunning());
     
     // Enter data
-    HTable table = new HTable(conf, tableName);
+    HTable table = new HTable(conf, getName());
     for (char i = FIRST_ROWKEY; i <= LAST_ROWKEY; i++) {
-      Text rowKey = new Text(new String(new char[] { i }));
+      byte [] rowKey = new byte [] { (byte)i };
       BatchUpdate batchUpdate = new BatchUpdate(rowKey);
       for (char j = 0; j < colKeys.length; j++) {
         batchUpdate.put(colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY && 
@@ -248,14 +242,13 @@
    * @return Returns column keys used making table.
    * @throws IOException
    */
-  private Text [] createTable(final HBaseAdmin admin, final Text tableName)
+  private byte [][] createTable(final HBaseAdmin admin, final String tableName)
   throws IOException {
     // Setup colkeys to be inserted
     HTableDescriptor htd = new HTableDescriptor(getName());
-    Text[] colKeys = new Text[(LAST_COLKEY - FIRST_COLKEY) + 1];
+    byte [][] colKeys = new byte[(LAST_COLKEY - FIRST_COLKEY) + 1][];
     for (char i = 0; i < colKeys.length; i++) {
-      colKeys[i] = new Text(new String(new char[] { 
-        (char)(FIRST_COLKEY + i), ':' }));
+      colKeys[i] = new byte [] {(byte)(FIRST_COLKEY + i), ':' };
       htd.addFamily(new HColumnDescriptor(colKeys[i].toString()));
     }
     admin.createTable(htd);
@@ -264,37 +257,38 @@
     return colKeys;
   }
   
-  private void regExpFilterTest(HTable table, Text[] colKeys) 
+  private void regExpFilterTest(HTable table, byte [][] colKeys) 
     throws Exception {
     // Get the filter.  The RegExpRowFilter used should filter out vowels.
-    Map<Text, byte[]> colCriteria = new TreeMap<Text, byte[]>();
+    Map<byte [], byte[]> colCriteria =
+      new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
     for (int i = 0; i < colKeys.length; i++) {
       colCriteria.put(colKeys[i], GOOD_BYTES);
     }
     RowFilterInterface filter = new RegExpRowFilter("[^aeiou]", colCriteria);
 
     // Create the scanner from the filter.
-    Scanner scanner = table.getScanner(colKeys, new Text(new 
-      String(new char[] { FIRST_ROWKEY })), filter);
+    Scanner scanner = table.getScanner(colKeys, new byte [] { FIRST_ROWKEY },
+      filter);
 
     // Iterate over the scanner, ensuring that results match the passed regex.
     iterateOnScanner(scanner, "[^aei-qu]");
   }
   
-  private void rowFilterSetTest(HTable table, Text[] colKeys) 
+  private void rowFilterSetTest(HTable table, byte [][] colKeys) 
   throws Exception {
     // Get the filter.  The RegExpRowFilter used should filter out vowels and 
     // the WhileMatchRowFilter(StopRowFilter) should filter out all rows 
     // greater than or equal to 'r'.
     Set<RowFilterInterface> filterSet = new HashSet<RowFilterInterface>();
     filterSet.add(new RegExpRowFilter("[^aeiou]"));
-    filterSet.add(new WhileMatchRowFilter(new StopRowFilter(new Text("r"))));
+    filterSet.add(new WhileMatchRowFilter(new StopRowFilter(Bytes.toBytes("r"))));
     RowFilterInterface filter = 
       new RowFilterSet(RowFilterSet.Operator.MUST_PASS_ALL, filterSet);
     
     // Create the scanner from the filter.
-    Scanner scanner = table.getScanner(colKeys, new Text(new 
-        String(new char[] { FIRST_ROWKEY })), filter);
+    Scanner scanner = table.getScanner(colKeys, new byte [] { FIRST_ROWKEY },
+      filter);
     
     // Iterate over the scanner, ensuring that results match the passed regex.
     iterateOnScanner(scanner, "[^aeior-z]");
@@ -327,8 +321,7 @@
     HTable metaTable = new HTable(conf, HConstants.META_TABLE_NAME);
     // First add a new table.  Its intial region will be added to META region.
     HBaseAdmin admin = new HBaseAdmin(conf);
-    Text tableName = new Text(getName());
-    admin.createTable(new HTableDescriptor(tableName.toString()));
+    admin.createTable(new HTableDescriptor(getName()));
     List<HRegionInfo> regions = scan(metaTable);
     assertEquals("Expected one region", 1, regions.size());
     HRegionInfo region = regions.get(0);
@@ -341,10 +334,10 @@
     Path homedir = new Path(getName());
     List<HRegion> newRegions = new ArrayList<HRegion>(2);
     newRegions.add(HRegion.createHRegion(
-        new HRegionInfo(desc, null, new Text("midway")),
+        new HRegionInfo(desc, null, Bytes.toBytes("midway")),
         homedir, this.conf));
     newRegions.add(HRegion.createHRegion(
-        new HRegionInfo(desc, new Text("midway"), null),
+        new HRegionInfo(desc, Bytes.toBytes("midway"), null),
         homedir, this.conf));
     try {
       for (HRegion r : newRegions) {
@@ -370,8 +363,8 @@
       HRegionLocation rl = t.getRegionLocation(t.getTableName());
       regionServer = t.getConnection().getHRegionConnection(rl.getServerAddress());
       scannerId = regionServer.openScanner(rl.getRegionInfo().getRegionName(),
-          HConstants.COLUMN_FAMILY_ARRAY, new Text(),
-          System.currentTimeMillis(), null);
+          HConstants.COLUMN_FAMILY_ARRAY, HConstants.EMPTY_START_ROW,
+          HConstants.LATEST_TIMESTAMP, null);
       while (true) {
         RowResult values = regionServer.next(scannerId);
         if (values == null || values.size() == 0) {
@@ -414,8 +407,8 @@
     batchUpdate.put(HConstants.COL_REGIONINFO,
       Writables.getBytes(region.getRegionInfo()));
     batchUpdate.put(HConstants.COL_SERVER,
-      Writables.stringToBytes(serverAddress.toString()));
-    batchUpdate.put(HConstants.COL_STARTCODE, Writables.longToBytes(startCode));
+      Bytes.toBytes(serverAddress.toString()));
+    batchUpdate.put(HConstants.COL_STARTCODE, Bytes.toBytes(startCode));
     t.commit(batchUpdate);
     // Assert added.
     byte [] bytes = 
@@ -435,7 +428,7 @@
    * @param regionName Region to remove.
    * @throws IOException
    */
-  private void removeRegionFromMETA(final HTable t, final Text regionName)
+  private void removeRegionFromMETA(final HTable t, final byte [] regionName)
   throws IOException {
     BatchUpdate batchUpdate = new BatchUpdate(regionName);
     batchUpdate.delete(HConstants.COL_REGIONINFO);

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java Thu May 15 15:10:47 2008
@@ -30,7 +30,6 @@
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.io.Text;
 
 /**
  * Abstract base class for HBase cluster junit tests.  Spins up an hbase
@@ -80,6 +79,7 @@
    * Run after dfs is ready but before hbase cluster is started up.
    */
   protected void preHBaseClusterSetup() throws Exception {
+    // continue
   } 
 
   /**
@@ -90,13 +90,14 @@
     // start the mini cluster
     this.cluster = new MiniHBaseCluster(conf, regionServers);
     // opening the META table ensures that cluster is running
-    HTable meta = new HTable(conf, new Text(".META."));
+    HTable meta = new HTable(conf, ".META.");
   }
   
   /**
    * Run after hbase cluster is started up.
    */
   protected void postHBaseClusterSetup() throws Exception {
+    // continue
   } 
 
   @Override

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java Thu May 15 15:10:47 2008
@@ -34,13 +34,13 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
 import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Scanner;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Abstract base class for test cases. Performs all static initialization
@@ -50,13 +50,13 @@
 
   /** configuration parameter name for test directory */
   public static final String TEST_DIRECTORY_KEY = "test.build.data";
-  
 
-  protected final static String COLFAMILY_NAME1 = "colfamily1:";
-  protected final static String COLFAMILY_NAME2 = "colfamily2:";
-  protected final static String COLFAMILY_NAME3 = "colfamily3:";
-  protected static Text [] COLUMNS = new Text [] {new Text(COLFAMILY_NAME1),
-    new Text(COLFAMILY_NAME2), new Text(COLFAMILY_NAME3)};
+  protected final static byte [] COLFAMILY_NAME1 = Bytes.toBytes("colfamily1:");
+  protected final static byte [] COLFAMILY_NAME2 = Bytes.toBytes("colfamily2:");
+  protected final static byte [] COLFAMILY_NAME3 = Bytes.toBytes("colfamily3:");
+  protected static final byte [][] COLUMNS = {COLFAMILY_NAME1,
+    COLFAMILY_NAME2, COLFAMILY_NAME3};
+
   private boolean localfs = false;
   protected Path testDir = null;
   protected FileSystem fs = null;
@@ -151,9 +151,9 @@
         conf.get(TEST_DIRECTORY_KEY, "test/build/data"), testName);
   }
 
-  protected HRegion createNewHRegion(HTableDescriptor desc, Text startKey,
-      Text endKey) throws IOException {
-    
+  protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey,
+      byte [] endKey)
+  throws IOException {
     FileSystem filesystem = FileSystem.get(conf);
     Path rootdir = filesystem.makeQualified(
         new Path(conf.get(HConstants.HBASE_DIR)));
@@ -190,13 +190,13 @@
   protected HTableDescriptor createTableDescriptor(final String name,
       final int versions) {
     HTableDescriptor htd = new HTableDescriptor(name);
-    htd.addFamily(new HColumnDescriptor(new Text(COLFAMILY_NAME1), versions,
+    htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME1, versions,
       CompressionType.NONE, false, false, Integer.MAX_VALUE,
       HConstants.FOREVER, null));
-    htd.addFamily(new HColumnDescriptor(new Text(COLFAMILY_NAME2), versions,
+    htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME2, versions,
       CompressionType.NONE, false, false, Integer.MAX_VALUE,
       HConstants.FOREVER, null));
-    htd.addFamily(new HColumnDescriptor(new Text(COLFAMILY_NAME3), versions,
+    htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME3, versions,
       CompressionType.NONE, false, false, Integer.MAX_VALUE, 
       HConstants.FOREVER, null));
     return htd;
@@ -210,15 +210,16 @@
    * @param column
    * @throws IOException
    */
-  protected static void addContent(final HRegion r, final String column)
+  protected static void addContent(final HRegion r, final byte [] column)
   throws IOException {
-    Text startKey = r.getRegionInfo().getStartKey();
-    Text endKey = r.getRegionInfo().getEndKey();
-    byte [] startKeyBytes = startKey.getBytes();
+    byte [] startKey = r.getRegionInfo().getStartKey();
+    byte [] endKey = r.getRegionInfo().getEndKey();
+    byte [] startKeyBytes = startKey;
     if (startKeyBytes == null || startKeyBytes.length == 0) {
       startKeyBytes = START_KEY_BYTES;
     }
-    addContent(new HRegionIncommon(r), column, startKeyBytes, endKey, -1);
+    addContent(new HRegionIncommon(r), Bytes.toString(column),
+      startKeyBytes, endKey, -1);
   }
 
   /**
@@ -245,7 +246,7 @@
    * @throws IOException
    */
   protected static void addContent(final Incommon updater, final String column,
-      final byte [] startKeyBytes, final Text endKey)
+      final byte [] startKeyBytes, final byte [] endKey)
   throws IOException {
     addContent(updater, column, startKeyBytes, endKey, -1);
   }
@@ -262,7 +263,7 @@
    * @throws IOException
    */
   protected static void addContent(final Incommon updater, final String column,
-      final byte [] startKeyBytes, final Text endKey, final long ts)
+      final byte [] startKeyBytes, final byte [] endKey, final long ts)
   throws IOException {
     // Add rows of three characters.  The first character starts with the
     // 'a' character and runs up to 'z'.  Per first character, we run the
@@ -274,18 +275,17 @@
       for (char d = secondCharStart; d <= LAST_CHAR; d++) {
         for (char e = thirdCharStart; e <= LAST_CHAR; e++) {
           byte [] bytes = new byte [] {(byte)c, (byte)d, (byte)e};
-          String s = new String(bytes, HConstants.UTF8_ENCODING) + PUNCTUATION;
-          bytes = s.getBytes(HConstants.UTF8_ENCODING);
-          Text t = new Text(s);
-          if (endKey != null && endKey.getLength() > 0
-              && endKey.compareTo(t) <= 0) {
+          String s = Bytes.toString(bytes) + PUNCTUATION;
+          byte [] t = Bytes.toBytes(s);
+          if (endKey != null && endKey.length > 0
+              && Bytes.compareTo(endKey, t) <= 0) {
             break EXIT;
           }
           try {
             BatchUpdate batchUpdate = ts == -1 ? 
               new BatchUpdate(t) : new BatchUpdate(t, ts);
             try {
-              batchUpdate.put(new Text(column), bytes);
+              batchUpdate.put(column, t);
               updater.commit(batchUpdate);
             } catch (RuntimeException ex) {
               ex.printStackTrace();
@@ -332,7 +332,7 @@
      * @return value for row/column pair
      * @throws IOException
      */
-    public Cell get(Text row, Text column) throws IOException;
+    public Cell get(byte [] row, byte [] column) throws IOException;
     /**
      * @param row
      * @param column
@@ -340,7 +340,7 @@
      * @return value for row/column pair for number of versions requested
      * @throws IOException
      */
-    public Cell[] get(Text row, Text column, int versions) throws IOException;
+    public Cell[] get(byte [] row, byte [] column, int versions) throws IOException;
     /**
      * @param row
      * @param column
@@ -349,7 +349,7 @@
      * @return value for row/column/timestamp tuple for number of versions
      * @throws IOException
      */
-    public Cell[] get(Text row, Text column, long ts, int versions)
+    public Cell[] get(byte [] row, byte [] column, long ts, int versions)
     throws IOException;
     /**
      * @param row
@@ -357,7 +357,7 @@
      * @param ts
      * @throws IOException
      */
-    public void deleteAll(Text row, Text column, long ts) throws IOException;
+    public void deleteAll(byte [] row, byte [] column, long ts) throws IOException;
 
     /**
      * @param batchUpdate
@@ -372,7 +372,7 @@
      * @return scanner for specified columns, first row and timestamp
      * @throws IOException
      */
-    public ScannerIncommon getScanner(Text [] columns, Text firstRow,
+    public ScannerIncommon getScanner(byte [] [] columns, byte [] firstRow,
       long ts) throws IOException;
   }
   
@@ -403,29 +403,32 @@
     };
     
     /** {@inheritDoc} */
-    public void deleteAll(Text row, Text column, long ts) throws IOException {
+    public void deleteAll(byte [] row, byte [] column, long ts)
+    throws IOException {
       this.region.deleteAll(row, column, ts);
     }
 
     /** {@inheritDoc} */
-    public ScannerIncommon getScanner(Text [] columns, Text firstRow, long ts) 
+    public ScannerIncommon getScanner(byte [][] columns, byte [] firstRow,
+      long ts) 
     throws IOException {
       return new 
         InternalScannerIncommon(region.getScanner(columns, firstRow, ts, null));
     }
 
     /** {@inheritDoc} */
-    public Cell get(Text row, Text column) throws IOException {
+    public Cell get(byte [] row, byte [] column) throws IOException {
       return this.region.get(row, column);
     }
 
     /** {@inheritDoc} */
-    public Cell[] get(Text row, Text column, int versions) throws IOException {
+    public Cell[] get(byte [] row, byte [] column, int versions)
+    throws IOException {
       return this.region.get(row, column, versions);
     }
 
     /** {@inheritDoc} */
-    public Cell[] get(Text row, Text column, long ts, int versions)
+    public Cell[] get(byte [] row, byte [] column, long ts, int versions)
     throws IOException {
       return this.region.get(row, column, ts, versions);
     }
@@ -435,7 +438,7 @@
      * @return values for each column in the specified row
      * @throws IOException
      */
-    public Map<Text, Cell> getFull(Text row) throws IOException {
+    public Map<byte [], Cell> getFull(byte [] row) throws IOException {
       return region.getFull(row, null, HConstants.LATEST_TIMESTAMP);
     }
 
@@ -473,37 +476,39 @@
     };
     
     /** {@inheritDoc} */
-    public void deleteAll(Text row, Text column, long ts) throws IOException {
+    public void deleteAll(byte [] row, byte [] column, long ts)
+    throws IOException {
       this.table.deleteAll(row, column, ts);
     }
     
     /** {@inheritDoc} */
-    public ScannerIncommon getScanner(Text [] columns, Text firstRow, long ts) 
+    public ScannerIncommon getScanner(byte [][] columns, byte [] firstRow, long ts) 
     throws IOException {
       return new 
         ClientScannerIncommon(table.getScanner(columns, firstRow, ts, null));
     }
     
     /** {@inheritDoc} */
-    public Cell get(Text row, Text column) throws IOException {
+    public Cell get(byte [] row, byte [] column) throws IOException {
       return this.table.get(row, column);
     }
     
     /** {@inheritDoc} */
-    public Cell[] get(Text row, Text column, int versions) throws IOException {
+    public Cell[] get(byte [] row, byte [] column, int versions)
+    throws IOException {
       return this.table.get(row, column, versions);
     }
     
     /** {@inheritDoc} */
-    public Cell[] get(Text row, Text column, long ts, int versions)
+    public Cell[] get(byte [] row, byte [] column, long ts, int versions)
     throws IOException {
       return this.table.get(row, column, ts, versions);
     }
   }
   
   public interface ScannerIncommon 
-  extends Iterable<Map.Entry<HStoreKey, SortedMap<Text, byte[]>>> {
-    public boolean next(HStoreKey key, SortedMap<Text, byte[]> values)
+  extends Iterable<Map.Entry<HStoreKey, SortedMap<byte [], byte[]>>> {
+    public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values)
     throws IOException;
     
     public void close() throws IOException;
@@ -515,19 +520,18 @@
       this.scanner = scanner;
     }
     
-    public boolean next(HStoreKey key, SortedMap<Text, byte[]> values)
+    public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values)
     throws IOException {
       RowResult results = scanner.next();
       if (results == null) {
         return false;
-      } else {
-        key.setRow(results.getRow());
-        values.clear();
-        for (Map.Entry<Text, Cell> entry : results.entrySet()) {
-          values.put(entry.getKey(), entry.getValue().getValue());
-        }
-        return true;
       }
+      key.setRow(results.getRow());
+      values.clear();
+      for (Map.Entry<byte [], Cell> entry : results.entrySet()) {
+        values.put(entry.getKey(), entry.getValue().getValue());
+      }
+      return true;
     }
     
     public void close() throws IOException {
@@ -546,7 +550,7 @@
       this.scanner = scanner;
     }
     
-    public boolean next(HStoreKey key, SortedMap<Text, byte[]> values)
+    public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values)
     throws IOException {
       return scanner.next(key, values);
     }
@@ -560,10 +564,10 @@
     }
   }
   
-  protected void assertCellEquals(final HRegion region, final Text row,
-    final Text column, final long timestamp, final String value)
+  protected void assertCellEquals(final HRegion region, final byte [] row,
+    final byte [] column, final long timestamp, final String value)
   throws IOException {
-    Map<Text, Cell> result = region.getFull(row, null, timestamp);
+    Map<byte [], Cell> result = region.getFull(row, null, timestamp);
     Cell cell_value = result.get(column);
     if(value == null){
       assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java Thu May 15 15:10:47 2008
@@ -27,8 +27,9 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
 /**
@@ -44,10 +45,10 @@
   static final Logger LOG =
     Logger.getLogger(MapFilePerformanceEvaluation.class.getName());
   
-  static Text format(final int i, final Text text) {
+  static ImmutableBytesWritable format(final int i, ImmutableBytesWritable w) {
     String v = Integer.toString(i);
-    text.set("0000000000".substring(v.length()) + v);
-    return text;
+    w.set(Bytes.toBytes("0000000000".substring(v.length()) + v));
+    return w;
   }
 
   private void runBenchmarks() throws Exception {
@@ -84,8 +85,6 @@
     protected final FileSystem fs;
     protected final Path mf;
     protected final int totalRows;
-    protected Text key;
-    protected Text val;
     
     public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf,
         int totalRows) {
@@ -93,8 +92,6 @@
       this.fs = fs;
       this.mf = mf;
       this.totalRows = totalRows;
-      this.key = new Text();
-      this.val = new Text();
     }
     
     void setUp() throws Exception {
@@ -141,6 +138,8 @@
     protected MapFile.Writer writer;
     private Random random = new Random();
     private byte[] bytes = new byte[ROW_LENGTH];
+    private ImmutableBytesWritable key = new ImmutableBytesWritable();
+    private ImmutableBytesWritable value = new ImmutableBytesWritable();
     
     public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf,
         int totalRows) {
@@ -150,13 +149,13 @@
     @Override
     void setUp() throws Exception {
       writer = new MapFile.Writer(conf, fs, mf.toString(),
-          Text.class, Text.class);
+        ImmutableBytesWritable.class, ImmutableBytesWritable.class);
     }
     
     @Override
     void doRow(int i) throws Exception {
-      val.set(generateValue());
-      writer.append(format(i, key), val); 
+      value.set(generateValue());
+      writer.append(format(i, key), value); 
     }
     
     private byte[] generateValue() {
@@ -177,6 +176,8 @@
   }
   
   static abstract class ReadBenchmark extends RowOrientedBenchmark {
+    ImmutableBytesWritable key = new ImmutableBytesWritable();
+    ImmutableBytesWritable value = new ImmutableBytesWritable();
     
     protected MapFile.Reader reader;
     
@@ -198,7 +199,7 @@
   }
 
   static class SequentialReadBenchmark extends ReadBenchmark {
-
+    
     public SequentialReadBenchmark(Configuration conf, FileSystem fs,
         Path mf, int totalRows) {
       super(conf, fs, mf, totalRows);
@@ -206,7 +207,7 @@
 
     @Override
     void doRow(@SuppressWarnings("unused") int i) throws Exception {
-      reader.next(key, val);
+      reader.next(key, value);
     }
     
     @Override
@@ -227,10 +228,10 @@
 
     @Override
     void doRow(@SuppressWarnings("unused") int i) throws Exception {
-      reader.get(getRandomRow(), val);
+      reader.get(getRandomRow(), value);
     }
     
-    private Text getRandomRow() {
+    private ImmutableBytesWritable getRandomRow() {
       return format(random.nextInt(totalRows), key);
     }
     
@@ -247,10 +248,10 @@
 
     @Override
     void doRow(@SuppressWarnings("unused") int i) throws Exception {
-      reader.get(getGaussianRandomRow(), val);
+      reader.get(getGaussianRandomRow(), value);
     }
     
-    private Text getGaussianRandomRow() {
+    private ImmutableBytesWritable getGaussianRandomRow() {
       int r = (int) randomData.nextGaussian(totalRows / 2, totalRows / 10);
       return format(r, key);
     }

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Thu May 15 15:10:47 2008
@@ -149,7 +149,7 @@
   public void flushcache() throws IOException {
     for (LocalHBaseCluster.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(HRegion r: t.getRegionServer().getOnlineRegions().values() ) {
+      for(HRegion r: t.getRegionServer().getOnlineRegions()) {
         r.flushcache();
       }
     }

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java Thu May 15 15:10:47 2008
@@ -21,43 +21,42 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.io.Text;
-
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Utility class to build a table of multiple regions.
  */
 public class MultiRegionTable extends HBaseClusterTestCase {
-  private static final Text[] KEYS = {
-    null,
-    new Text("bbb"),
-    new Text("ccc"),
-    new Text("ddd"),
-    new Text("eee"),
-    new Text("fff"),
-    new Text("ggg"),
-    new Text("hhh"),
-    new Text("iii"),
-    new Text("jjj"),
-    new Text("kkk"),
-    new Text("lll"),
-    new Text("mmm"),
-    new Text("nnn"),
-    new Text("ooo"),
-    new Text("ppp"),
-    new Text("qqq"),
-    new Text("rrr"),
-    new Text("sss"),
-    new Text("ttt"),
-    new Text("uuu"),
-    new Text("vvv"),
-    new Text("www"),
-    new Text("xxx"),
-    new Text("yyy")
+  private static final byte [][] KEYS = {
+    HConstants.EMPTY_BYTE_ARRAY,
+    Bytes.toBytes("bbb"),
+    Bytes.toBytes("ccc"),
+    Bytes.toBytes("ddd"),
+    Bytes.toBytes("eee"),
+    Bytes.toBytes("fff"),
+    Bytes.toBytes("ggg"),
+    Bytes.toBytes("hhh"),
+    Bytes.toBytes("iii"),
+    Bytes.toBytes("jjj"),
+    Bytes.toBytes("kkk"),
+    Bytes.toBytes("lll"),
+    Bytes.toBytes("mmm"),
+    Bytes.toBytes("nnn"),
+    Bytes.toBytes("ooo"),
+    Bytes.toBytes("ppp"),
+    Bytes.toBytes("qqq"),
+    Bytes.toBytes("rrr"),
+    Bytes.toBytes("sss"),
+    Bytes.toBytes("ttt"),
+    Bytes.toBytes("uuu"),
+    Bytes.toBytes("vvv"),
+    Bytes.toBytes("www"),
+    Bytes.toBytes("xxx"),
+    Bytes.toBytes("yyy")
   };
   
-  protected final String columnName;
+  protected final byte [] columnName;
   protected HTableDescriptor desc;
 
   /**
@@ -65,7 +64,7 @@
    */
   public MultiRegionTable(final String columnName) {
     super();
-    this.columnName = columnName;
+    this.columnName = Bytes.toBytes(columnName);
     // These are needed for the new and improved Map/Reduce framework
     System.setProperty("hadoop.log.dir", conf.get("hadoop.log.dir"));
     conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir"));
@@ -78,7 +77,6 @@
   protected void preHBaseClusterSetup() throws Exception {
     try {
       // Create a bunch of regions
-
       HRegion[] regions = new HRegion[KEYS.length];
       for (int i = 0; i < regions.length; i++) {
         int j = (i + 1) % regions.length;
@@ -87,17 +85,14 @@
 
       // Now create the root and meta regions and insert the data regions
       // created above into the meta
-
-      HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo,
+      HRegion root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO,
           testDir, this.conf);
-      HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo,
+      HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
           testDir, this.conf);
       HRegion.addRegionToMETA(root, meta);
-
       for(int i = 0; i < regions.length; i++) {
         HRegion.addRegionToMETA(meta, regions[i]);
       }
-
       closeRegionAndDeleteLog(root);
       closeRegionAndDeleteLog(meta);
     } catch (Exception e) {
@@ -106,7 +101,7 @@
     }
   } 
 
-  private HRegion createARegion(Text startKey, Text endKey) throws IOException {
+  private HRegion createARegion(byte [] startKey, byte [] endKey) throws IOException {
     HRegion region = createNewHRegion(desc, startKey, endKey);
     addContent(region, this.columnName);
     closeRegionAndDeleteLog(region);

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java Thu May 15 15:10:47 2008
@@ -26,19 +26,21 @@
 import java.util.Date;
 import java.util.List;
 import java.util.Random;
-import java.util.TreeMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Scanner;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MapReduceBase;
@@ -48,10 +50,6 @@
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.log4j.Logger;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.io.BatchUpdate;
 
 
 /**
@@ -77,12 +75,12 @@
   private static final int ROW_LENGTH = 1000;
   private static final int ONE_GB = 1024 * 1024 * 1000;
   private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH;
-  static final Text COLUMN_NAME = new Text(COLUMN_FAMILY + "data");
+  static final byte [] COLUMN_NAME = Bytes.toBytes(COLUMN_FAMILY_STR + "data");
   
   protected static HTableDescriptor tableDescriptor;
   static {
     tableDescriptor = new HTableDescriptor("TestTable");
-    tableDescriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY.toString()));
+    tableDescriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
   }
   
   private static final String RANDOM_READ = "randomRead";
@@ -388,7 +386,7 @@
     
     @Override
     void testRow(@SuppressWarnings("unused") final int i) throws IOException {
-      Text row = getRandomRow(this.rand, this.totalRows);
+      byte [] row = getRandomRow(this.rand, this.totalRows);
       BatchUpdate b = new BatchUpdate(row);
       b.put(COLUMN_NAME, generateValue(this.rand));
       table.commit(b);
@@ -411,7 +409,7 @@
     @Override
     void testSetup() throws IOException {
       super.testSetup();
-      this.testScanner = table.getScanner(new Text[] {COLUMN_NAME},
+      this.testScanner = table.getScanner(new byte [][] {COLUMN_NAME},
         format(this.startRow));
     }
     
@@ -473,14 +471,18 @@
   
   /*
    * Format passed integer.
-   * This method takes some time and is done inline uploading data.  For
-   * example, doing the mapfile test, generation of the key and value
-   * consumes about 30% of CPU time.
-   * @param i
-   * @return Integer as String zero padded.
-   */
-  static Text format(final int i) {
-    return new Text(String.format("%010d", Integer.valueOf(i)));
+   * @param number
+   * @return Returns zero-prefixed 10-byte wide decimal version of passed
+   * number (Does absolute in case number is negative).
+   */
+  static byte [] format(final int number) {
+    byte [] b = new byte[10];
+    int d = Math.abs(number);
+    for (int i = b.length - 1; i > 0; i--) {
+      b[i] = (byte)((d % 10) + '0');
+      d /= 10;
+    }
+    return b;
   }
   
   /*
@@ -495,8 +497,8 @@
     return b;
   }
   
-  static Text getRandomRow(final Random random, final int totalRows) {
-    return new Text(format(random.nextInt(Integer.MAX_VALUE) % totalRows));
+  static byte [] getRandomRow(final Random random, final int totalRows) {
+    return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
   }
   
   long runOneClient(final String cmd, final int startRow,
@@ -695,7 +697,8 @@
    * @param args
    */
   public static void main(final String[] args) {
-    System.exit(new PerformanceEvaluation(new HBaseConfiguration()).
+    HBaseConfiguration c = new HBaseConfiguration();
+    System.exit(new PerformanceEvaluation(c).
       doCommandLine(args));
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java Thu May 15 15:10:47 2008
@@ -22,127 +22,127 @@
 import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 
 import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /** Tests per-column bloom filters */
 public class TestBloomFilters extends HBaseClusterTestCase {
   static final Log LOG = LogFactory.getLog(TestBloomFilters.class);
 
-  private static final Text CONTENTS = new Text("contents:");
+  private static final byte [] CONTENTS = Bytes.toBytes("contents:");
 
-  private static final Text[] rows = {
-    new Text("wmjwjzyv"),
-    new Text("baietibz"),
-    new Text("guhsgxnv"),
-    new Text("mhnqycto"),
-    new Text("xcyqafgz"),
-    new Text("zidoamgb"),
-    new Text("tftfirzd"),
-    new Text("okapqlrg"),
-    new Text("yccwzwsq"),
-    new Text("qmonufqu"),
-    new Text("wlsctews"),
-    new Text("mksdhqri"),
-    new Text("wxxllokj"),
-    new Text("eviuqpls"),
-    new Text("bavotqmj"),
-    new Text("yibqzhdl"),
-    new Text("csfqmsyr"),
-    new Text("guxliyuh"),
-    new Text("pzicietj"),
-    new Text("qdwgrqwo"),
-    new Text("ujfzecmi"),
-    new Text("dzeqfvfi"),
-    new Text("phoegsij"),
-    new Text("bvudfcou"),
-    new Text("dowzmciz"),
-    new Text("etvhkizp"),
-    new Text("rzurqycg"),
-    new Text("krqfxuge"),
-    new Text("gflcohtd"),
-    new Text("fcrcxtps"),
-    new Text("qrtovxdq"),
-    new Text("aypxwrwi"),
-    new Text("dckpyznr"),
-    new Text("mdaawnpz"),
-    new Text("pakdfvca"),
-    new Text("xjglfbez"),
-    new Text("xdsecofi"),
-    new Text("sjlrfcab"),
-    new Text("ebcjawxv"),
-    new Text("hkafkjmy"),
-    new Text("oimmwaxo"),
-    new Text("qcuzrazo"),
-    new Text("nqydfkwk"),
-    new Text("frybvmlb"),
-    new Text("amxmaqws"),
-    new Text("gtkovkgx"),
-    new Text("vgwxrwss"),
-    new Text("xrhzmcep"),
-    new Text("tafwziil"),
-    new Text("erjmncnv"),
-    new Text("heyzqzrn"),
-    new Text("sowvyhtu"),
-    new Text("heeixgzy"),
-    new Text("ktcahcob"),
-    new Text("ljhbybgg"),
-    new Text("jiqfcksl"),
-    new Text("anjdkjhm"),
-    new Text("uzcgcuxp"),
-    new Text("vzdhjqla"),
-    new Text("svhgwwzq"),
-    new Text("zhswvhbp"),
-    new Text("ueceybwy"),
-    new Text("czkqykcw"),
-    new Text("ctisayir"),
-    new Text("hppbgciu"),
-    new Text("nhzgljfk"),
-    new Text("vaziqllf"),
-    new Text("narvrrij"),
-    new Text("kcevbbqi"),
-    new Text("qymuaqnp"),
-    new Text("pwqpfhsr"),
-    new Text("peyeicuk"),
-    new Text("kudlwihi"),
-    new Text("pkmqejlm"),
-    new Text("ylwzjftl"),
-    new Text("rhqrlqar"),
-    new Text("xmftvzsp"),
-    new Text("iaemtihk"),
-    new Text("ymsbrqcu"),
-    new Text("yfnlcxto"),
-    new Text("nluqopqh"),
-    new Text("wmrzhtox"),
-    new Text("qnffhqbl"),
-    new Text("zypqpnbw"),
-    new Text("oiokhatd"),
-    new Text("mdraddiu"),
-    new Text("zqoatltt"),
-    new Text("ewhulbtm"),
-    new Text("nmswpsdf"),
-    new Text("xsjeteqe"),
-    new Text("ufubcbma"),
-    new Text("phyxvrds"),
-    new Text("vhnfldap"),
-    new Text("zrrlycmg"),
-    new Text("becotcjx"),
-    new Text("wvbubokn"),
-    new Text("avkgiopr"),
-    new Text("mbqqxmrv"),
-    new Text("ibplgvuu"),
-    new Text("dghvpkgc")
+  private static final byte [][] rows = {
+    Bytes.toBytes("wmjwjzyv"),
+    Bytes.toBytes("baietibz"),
+    Bytes.toBytes("guhsgxnv"),
+    Bytes.toBytes("mhnqycto"),
+    Bytes.toBytes("xcyqafgz"),
+    Bytes.toBytes("zidoamgb"),
+    Bytes.toBytes("tftfirzd"),
+    Bytes.toBytes("okapqlrg"),
+    Bytes.toBytes("yccwzwsq"),
+    Bytes.toBytes("qmonufqu"),
+    Bytes.toBytes("wlsctews"),
+    Bytes.toBytes("mksdhqri"),
+    Bytes.toBytes("wxxllokj"),
+    Bytes.toBytes("eviuqpls"),
+    Bytes.toBytes("bavotqmj"),
+    Bytes.toBytes("yibqzhdl"),
+    Bytes.toBytes("csfqmsyr"),
+    Bytes.toBytes("guxliyuh"),
+    Bytes.toBytes("pzicietj"),
+    Bytes.toBytes("qdwgrqwo"),
+    Bytes.toBytes("ujfzecmi"),
+    Bytes.toBytes("dzeqfvfi"),
+    Bytes.toBytes("phoegsij"),
+    Bytes.toBytes("bvudfcou"),
+    Bytes.toBytes("dowzmciz"),
+    Bytes.toBytes("etvhkizp"),
+    Bytes.toBytes("rzurqycg"),
+    Bytes.toBytes("krqfxuge"),
+    Bytes.toBytes("gflcohtd"),
+    Bytes.toBytes("fcrcxtps"),
+    Bytes.toBytes("qrtovxdq"),
+    Bytes.toBytes("aypxwrwi"),
+    Bytes.toBytes("dckpyznr"),
+    Bytes.toBytes("mdaawnpz"),
+    Bytes.toBytes("pakdfvca"),
+    Bytes.toBytes("xjglfbez"),
+    Bytes.toBytes("xdsecofi"),
+    Bytes.toBytes("sjlrfcab"),
+    Bytes.toBytes("ebcjawxv"),
+    Bytes.toBytes("hkafkjmy"),
+    Bytes.toBytes("oimmwaxo"),
+    Bytes.toBytes("qcuzrazo"),
+    Bytes.toBytes("nqydfkwk"),
+    Bytes.toBytes("frybvmlb"),
+    Bytes.toBytes("amxmaqws"),
+    Bytes.toBytes("gtkovkgx"),
+    Bytes.toBytes("vgwxrwss"),
+    Bytes.toBytes("xrhzmcep"),
+    Bytes.toBytes("tafwziil"),
+    Bytes.toBytes("erjmncnv"),
+    Bytes.toBytes("heyzqzrn"),
+    Bytes.toBytes("sowvyhtu"),
+    Bytes.toBytes("heeixgzy"),
+    Bytes.toBytes("ktcahcob"),
+    Bytes.toBytes("ljhbybgg"),
+    Bytes.toBytes("jiqfcksl"),
+    Bytes.toBytes("anjdkjhm"),
+    Bytes.toBytes("uzcgcuxp"),
+    Bytes.toBytes("vzdhjqla"),
+    Bytes.toBytes("svhgwwzq"),
+    Bytes.toBytes("zhswvhbp"),
+    Bytes.toBytes("ueceybwy"),
+    Bytes.toBytes("czkqykcw"),
+    Bytes.toBytes("ctisayir"),
+    Bytes.toBytes("hppbgciu"),
+    Bytes.toBytes("nhzgljfk"),
+    Bytes.toBytes("vaziqllf"),
+    Bytes.toBytes("narvrrij"),
+    Bytes.toBytes("kcevbbqi"),
+    Bytes.toBytes("qymuaqnp"),
+    Bytes.toBytes("pwqpfhsr"),
+    Bytes.toBytes("peyeicuk"),
+    Bytes.toBytes("kudlwihi"),
+    Bytes.toBytes("pkmqejlm"),
+    Bytes.toBytes("ylwzjftl"),
+    Bytes.toBytes("rhqrlqar"),
+    Bytes.toBytes("xmftvzsp"),
+    Bytes.toBytes("iaemtihk"),
+    Bytes.toBytes("ymsbrqcu"),
+    Bytes.toBytes("yfnlcxto"),
+    Bytes.toBytes("nluqopqh"),
+    Bytes.toBytes("wmrzhtox"),
+    Bytes.toBytes("qnffhqbl"),
+    Bytes.toBytes("zypqpnbw"),
+    Bytes.toBytes("oiokhatd"),
+    Bytes.toBytes("mdraddiu"),
+    Bytes.toBytes("zqoatltt"),
+    Bytes.toBytes("ewhulbtm"),
+    Bytes.toBytes("nmswpsdf"),
+    Bytes.toBytes("xsjeteqe"),
+    Bytes.toBytes("ufubcbma"),
+    Bytes.toBytes("phyxvrds"),
+    Bytes.toBytes("vhnfldap"),
+    Bytes.toBytes("zrrlycmg"),
+    Bytes.toBytes("becotcjx"),
+    Bytes.toBytes("wvbubokn"),
+    Bytes.toBytes("avkgiopr"),
+    Bytes.toBytes("mbqqxmrv"),
+    Bytes.toBytes("ibplgvuu"),
+    Bytes.toBytes("dghvpkgc")
   };
 
-  private static final Text[] testKeys = {
-      new Text("abcdefgh"),
-      new Text("ijklmnop"),
-      new Text("qrstuvwx"),
-      new Text("yzabcdef")
+  private static final byte [][] testKeys = {
+      Bytes.toBytes("abcdefgh"),
+      Bytes.toBytes("ijklmnop"),
+      Bytes.toBytes("qrstuvwx"),
+      Bytes.toBytes("yzabcdef")
   };
   
   /** constructor */
@@ -191,7 +191,7 @@
     // Store some values
 
     for(int i = 0; i < 100; i++) {
-      Text row = rows[i];
+      byte [] row = rows[i];
       String value = row.toString();
       BatchUpdate b = new BatchUpdate(row);
       b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
@@ -201,7 +201,6 @@
       // Give cache flusher and log roller a chance to run
       // Otherwise we'll never hit the bloom filter, just the memcache
       Thread.sleep(conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000) * 2);
-      
     } catch (InterruptedException e) {
       // ignore
     }
@@ -258,7 +257,7 @@
     // Store some values
 
     for(int i = 0; i < 100; i++) {
-      Text row = rows[i];
+      byte [] row = rows[i];
       String value = row.toString();
       BatchUpdate b = new BatchUpdate(row);
       b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java Thu May 15 15:10:47 2008
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hbase;
 
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.util.Bytes;
 
 import junit.framework.TestCase;
 
@@ -35,7 +35,7 @@
    */
   public void testHStoreKey() {
     long timestamp = System.currentTimeMillis();
-    Text a = new Text("a");
+    byte [] a = Bytes.toBytes("a");
     HStoreKey past = new HStoreKey(a, a, timestamp - 10);
     HStoreKey now = new HStoreKey(a, a, timestamp);
     HStoreKey future = new HStoreKey(a, a, timestamp + 10);
@@ -52,21 +52,21 @@
     HRegionInfo b = new HRegionInfo(new HTableDescriptor("b"), null, null);
     assertTrue(a.compareTo(b) != 0);
     HTableDescriptor t = new HTableDescriptor("t");
-    Text midway = new Text("midway");
+    byte [] midway = Bytes.toBytes("midway");
     a = new HRegionInfo(t, null, midway);
     b = new HRegionInfo(t, midway, null);
     assertTrue(a.compareTo(b) < 0);
     assertTrue(b.compareTo(a) > 0);
     assertEquals(a, a);
     assertTrue(a.compareTo(a) == 0);
-    a = new HRegionInfo(t, new Text("a"), new Text("d"));
-    b = new HRegionInfo(t, new Text("e"), new Text("g"));
+    a = new HRegionInfo(t, Bytes.toBytes("a"), Bytes.toBytes("d"));
+    b = new HRegionInfo(t, Bytes.toBytes("e"), Bytes.toBytes("g"));
     assertTrue(a.compareTo(b) < 0);
-    a = new HRegionInfo(t, new Text("aaaa"), new Text("dddd"));
-    b = new HRegionInfo(t, new Text("e"), new Text("g"));
+    a = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
+    b = new HRegionInfo(t, Bytes.toBytes("e"), Bytes.toBytes("g"));
     assertTrue(a.compareTo(b) < 0);
-    a = new HRegionInfo(t, new Text("aaaa"), new Text("dddd"));
-    b = new HRegionInfo(t, new Text("aaaa"), new Text("eeee"));
+    a = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
+    b = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("eeee"));
     assertTrue(a.compareTo(b) < 0);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java Thu May 15 15:10:47 2008
@@ -22,16 +22,11 @@
 
 import java.io.IOException;
 
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.hadoop.io.Text;
-
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Tests master cleanup of rows in meta table where there is no HRegionInfo
@@ -43,12 +38,14 @@
    */
   public void testEmptyMetaInfo() throws IOException {
     HTable t = new HTable(conf, HConstants.META_TABLE_NAME);
-    for (int i = 0; i < 5; i++) {
-      Text regionName = new Text("tablename," + (i == 0 ? "" : (i +",")) + 
-          System.currentTimeMillis());
+    final int COUNT = 5;
+    final byte [] tableName = Bytes.toBytes(getName());
+    for (int i = 0; i < COUNT; i++) {
+      byte [] regionName = HRegionInfo.createRegionName(tableName,
+        Bytes.toBytes(i == 0? "": Integer.toString(i)),
+        Long.toString(System.currentTimeMillis()));
       BatchUpdate b = new BatchUpdate(regionName);
-      b.put(HConstants.COL_SERVER,
-          "localhost:1234".getBytes(HConstants.UTF8_ENCODING));
+      b.put(HConstants.COL_SERVER, Bytes.toBytes("localhost:1234"));
       t.commit(b);
     }
     long sleepTime =
@@ -62,15 +59,13 @@
       } catch (InterruptedException e) {
         // ignore
       }
-      Scanner scanner =
-        t.getScanner(HConstants.ALL_META_COLUMNS, new Text("tablename"));
-
+      Scanner scanner = t.getScanner(HConstants.ALL_META_COLUMNS, tableName);
       try {
         count = 0;
-        HStoreKey key = new HStoreKey();
-        SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-        for (RowResult r : scanner) {
-          count += 1;
+        for (RowResult r: scanner) {
+          if (r.size() > 0) {
+            count += 1;
+          }
         }
       } finally {
         scanner.close();

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestGlobalMemcacheLimit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestGlobalMemcacheLimit.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestGlobalMemcacheLimit.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestGlobalMemcacheLimit.java Thu May 15 15:10:47 2008
@@ -21,13 +21,13 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Test setting the global memcache size for a region server. When it reaches 
@@ -41,7 +41,7 @@
   HTable table2;
   HRegionServer server;
   
-  long keySize = (new Text(COLFAMILY_NAME1)).getLength() + 9 + 8;
+  long keySize =  COLFAMILY_NAME1.length + 9 + 8;
   long rowSize = keySize + ONE_KB.length;
   
   /**
@@ -69,13 +69,13 @@
     HBaseAdmin admin = new HBaseAdmin(conf);
     admin.createTable(desc1);
     admin.createTable(desc2);
-    table1 = new HTable(conf, new Text("testTable1"));
-    table2 = new HTable(conf, new Text("testTable2"));    
+    table1 = new HTable(conf, "testTable1");
+    table2 = new HTable(conf, "testTable2");    
     server = cluster.getRegionServer(0);    
     
     // there is a META region in play, and those are probably still in
     // the memcache for ROOT. flush it out.
-    for (HRegion region : server.getOnlineRegions().values()) {
+    for (HRegion region : server.getOnlineRegions()) {
       region.flushcache();
     }
     // make sure we're starting at 0 so that it's easy to predict what the 
@@ -122,14 +122,15 @@
     assertTrue("Post-flush memcache size", server.getGlobalMemcacheSize() <= 1024 * 1024);
   }
   
-  private long populate(HTable table, int numRows, int startKey) throws IOException {
+  private long populate(HTable table, int numRows, int startKey)
+  throws IOException {
     long total = 0;
     BatchUpdate batchUpdate = null;
-    Text column = new Text(COLFAMILY_NAME1);
+    byte [] column = COLFAMILY_NAME1;
     for (int i = startKey; i < startKey + numRows; i++) {
-      Text key = new Text("row_" + String.format("%1$5d", i));
-      total += key.getLength();
-      total += column.getLength();
+      byte [] key = Bytes.toBytes("row_" + String.format("%1$5d", i));
+      total += key.length;
+      total += column.length;
       total += 8;
       total += ONE_KB.length;
       batchUpdate = new BatchUpdate(key);



Mime
View raw message