hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject svn commit: r562041 - in /lucene/hadoop/trunk/src/contrib/hbase: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/mapred/ src/test/org/apache/hadoop/hbase/
Date Thu, 02 Aug 2007 08:15:32 GMT
Author: jimk
Date: Thu Aug  2 01:15:26 2007
New Revision: 562041

URL: http://svn.apache.org/viewvc?view=rev&rev=562041
Log:
HADOOP-1528 HClient for multiple tables (phase 2) all HBase client side code
(except TestHClient and HBaseShell) have been converted to use the new client
side objects (HTable/HBaseAdmin/HConnection) instead of HClient.

HBaseAdmin
- Expose connection methods getMaster, isMasterRunning and listTables


Modified:
    lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTableMapReduce.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTimestamp.java

Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Thu Aug  2 01:15:26 2007
@@ -80,4 +80,7 @@
  50. HADOOP-1468 Add HBase batch update to reduce RPC overhead (restrict batches
      to a single row at a time)
  51. HADOOP-1528 HClient for multiple tables (phase 1)
+ 52. HADOOP-1528 HClient for multiple tables (phase 2) all HBase client side code
+     (except TestHClient and HBaseShell) have been converted to use the new client
+     side objects (HTable/HBaseAdmin/HConnection) instead of HClient.
 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java Thu Aug  2 01:15:26 2007
@@ -57,6 +57,46 @@
   }
 
   /**
+   * @return proxy connection to master server for this instance
+   * @throws MasterNotRunningException
+   */
+  public HMasterInterface getMaster() throws MasterNotRunningException{
+    return this.connection.getMaster();
+  }
+  
+  /** @return - true if the master server is running */
+  public boolean isMasterRunning() {
+    return this.connection.isMasterRunning();
+  }
+
+  /**
+   * @param tableName Table to check.
+   * @return True if table exists already.
+   * @throws MasterNotRunningException
+   */
+  public boolean tableExists(final Text tableName) throws MasterNotRunningException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    return connection.tableExists(tableName);
+  }
+
+  /**
+   * List all the userspace tables.  In other words, scan the META table.
+   *
+   * If we wanted this to be really fast, we could implement a special
+   * catalog table that just contains table names and their descriptors.
+   * Right now, it only exists as part of the META table's region info.
+   *
+   * @return - returns an array of HTableDescriptors 
+   * @throws IOException
+   */
+  public HTableDescriptor[] listTables() throws IOException {
+    return this.connection.listTables();
+  }
+
+  /**
    * Creates a new table
    * 
    * @param desc table descriptor for table
@@ -380,19 +420,6 @@
     LOG.info("Disabled table " + tableName);
   }
   
-  /**
-   * @param tableName Table to check.
-   * @return True if table exists already.
-   * @throws MasterNotRunningException
-   */
-  public boolean tableExists(final Text tableName) throws MasterNotRunningException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    return connection.tableExists(tableName);
-  }
-
   /**
    * Add a column to an existing table
    * 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java Thu Aug  2 01:15:26 2007
@@ -94,7 +94,7 @@
   private Server server;
   private HServerAddress address;
   
-  HClient client;
+  HConnection connection;
  
   long metaRescanInterval;
   
@@ -188,7 +188,7 @@
           region.regionName);
 
       try {
-        regionServer = client.getHRegionConnection(region.server);
+        regionServer = connection.getHRegionConnection(region.server);
         scannerId = regionServer.openScanner(region.regionName, METACOLUMNS,
           FIRST_ROW, System.currentTimeMillis(), null);
 
@@ -681,7 +681,7 @@
     this.address = new HServerAddress(server.getListenerAddress());
     conf.set(MASTER_ADDRESS, address.toString());
     
-    this.client = new HClient(conf);
+    this.connection = HConnectionManager.getConnection(conf);
     
     this.metaRescanInterval
       = conf.getLong("hbase.master.meta.thread.rescanfrequency", 60 * 1000);
@@ -1478,7 +1478,7 @@
             // scanned
             return false;
           }
-          server = client.getHRegionConnection(rootRegionLocation);
+          server = connection.getHRegionConnection(rootRegionLocation);
           scannerId = -1L;
 
           try {
@@ -1523,7 +1523,7 @@
             HRegionInterface server = null;
             long scannerId = -1L;
 
-            server = client.getHRegionConnection(r.server);
+            server = connection.getHRegionConnection(r.server);
           
             scannerId = server.openScanner(r.regionName, columns, startRow,
                 System.currentTimeMillis(), null);
@@ -1595,7 +1595,7 @@
             return false;
           }
           metaRegionName = HGlobals.rootRegionInfo.regionName;
-          server = client.getHRegionConnection(rootRegionLocation);
+          server = connection.getHRegionConnection(rootRegionLocation);
           onlineMetaRegions.remove(regionInfo.getStartKey());
 
         } else {
@@ -1619,7 +1619,7 @@
                 onlineMetaRegions.headMap(regionInfo.getRegionName()).lastKey());
           }
           metaRegionName = r.regionName;
-          server = client.getHRegionConnection(r.server);
+          server = connection.getHRegionConnection(r.server);
         }
 
         long clientId = rand.nextLong();
@@ -1734,7 +1734,7 @@
             return false;
           }
           metaRegionName = HGlobals.rootRegionInfo.regionName;
-          server = client.getHRegionConnection(rootRegionLocation);
+          server = connection.getHRegionConnection(rootRegionLocation);
 
         } else {
           if (!rootScanned
@@ -1762,7 +1762,7 @@
                 onlineMetaRegions.headMap(region.getRegionName()).lastKey());
           }
           metaRegionName = r.regionName;
-          server = client.getHRegionConnection(r.server);
+          server = connection.getHRegionConnection(r.server);
         }
         LOG.info("updating row " + region.getRegionName() + " in table " +
             metaRegionName);
@@ -1898,12 +1898,12 @@
             onlineMetaRegions.get(onlineMetaRegions.
               headMap(newRegion.getTableDesc().getName()).lastKey());
       Text metaRegionName = m.regionName;
-      HRegionInterface connection = client.getHRegionConnection(m.server);
-      long scannerid = connection.openScanner(metaRegionName,
+      HRegionInterface r = connection.getHRegionConnection(m.server);
+      long scannerid = r.openScanner(metaRegionName,
         new Text[] { COL_REGIONINFO }, tableName, System.currentTimeMillis(),
         null);
       try {
-        KeyedData[] data = connection.next(scannerid);
+        KeyedData[] data = r.next(scannerid);
         // Test data and that the row for the data is for our table. If
         // table does not exist, scanner will return row after where our table
         // would be inserted if it exists so look for exact match on table
@@ -1915,30 +1915,29 @@
           throw new TableExistsException(tableName.toString());
         }
       } finally {
-        connection.close(scannerid);
+        r.close(scannerid);
       }
 
       // 2. Create the HRegion
-      HRegion r = HRegion.createHRegion(newRegion.regionId, newRegion.
+      HRegion region = HRegion.createHRegion(newRegion.regionId, newRegion.
         getTableDesc(), this.dir, this.conf);
 
       // 3. Insert into meta
-      HRegionInfo info = r.getRegionInfo();
-      Text regionName = r.getRegionName();
+      HRegionInfo info = region.getRegionInfo();
+      Text regionName = region.getRegionName();
       ByteArrayOutputStream byteValue = new ByteArrayOutputStream();
       DataOutputStream s = new DataOutputStream(byteValue);
       info.write(s);
       long clientId = rand.nextLong();
-      long lockid = connection.
-        startUpdate(metaRegionName, clientId, regionName);
-      connection.put(metaRegionName, clientId, lockid, COL_REGIONINFO,
+      long lockid = r.startUpdate(metaRegionName, clientId, regionName);
+      r.put(metaRegionName, clientId, lockid, COL_REGIONINFO,
         byteValue.toByteArray());
-      connection.commit(metaRegionName, clientId, lockid,
+      r.commit(metaRegionName, clientId, lockid,
         System.currentTimeMillis());
 
       // 4. Close the new region to flush it to disk.  Close its log file too.
-      r.close();
-      r.getLog().closeAndDelete();
+      region.close();
+      region.getLog().closeAndDelete();
 
       // 5. Get it assigned to a server
       unassignedRegions.put(regionName, info);
@@ -2039,7 +2038,7 @@
 
               // Get a connection to a meta server
 
-              HRegionInterface server = client.getHRegionConnection(m.server);
+              HRegionInterface server = connection.getHRegionConnection(m.server);
 
               // Open a scanner on the meta region
               
@@ -2549,8 +2548,8 @@
       
       if (cmd.equals("stop")) {
         try {
-          HClient client = new HClient(conf);
-          client.shutdown();
+          HBaseAdmin adm = new HBaseAdmin(conf);
+          adm.shutdown();
         } catch (Throwable t) {
           LOG.error( "Can not stop master because " +
               StringUtils.stringifyException(t) );

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java Thu Aug  2 01:15:26 2007
@@ -61,9 +61,9 @@
    */
   public static void merge(Configuration conf, FileSystem fs, Text tableName)
       throws IOException {
-    
-    HClient client = new HClient(conf);
-    boolean masterIsRunning = client.isMasterRunning();
+
+    HConnection connection = HConnectionManager.getConnection(conf);
+    boolean masterIsRunning = connection.isMasterRunning();
     if(tableName.equals(META_TABLE_NAME)) {
         if(masterIsRunning) {
           throw new IllegalStateException(
@@ -76,7 +76,7 @@
         throw new IllegalStateException(
             "HBase instance must be running to merge a normal table");
       }
-      new OnlineMerger(conf, fs, client, tableName).process();
+      new OnlineMerger(conf, fs, tableName).process();
     }
   }
 
@@ -195,17 +195,16 @@
 
   /** Instantiated to compact a normal user table */
   private static class OnlineMerger extends Merger {
-    private HClient client;
+    private HTable table;
     private HScannerInterface metaScanner;
     private HRegionInfo latestRegion;
     
-    OnlineMerger(Configuration conf, FileSystem fs, HClient client,
-        Text tableName) throws IOException {
+    OnlineMerger(Configuration conf, FileSystem fs, Text tableName)
+    throws IOException {
       
       super(conf, fs, tableName);
-      this.client = client;
-      client.openTable(META_TABLE_NAME);
-      this.metaScanner = client.obtainScanner(META_COLS, new Text());
+      this.table = new HTable(conf, META_TABLE_NAME);
+      this.metaScanner = table.obtainScanner(META_COLS, new Text());
       this.latestRegion = null;
     }
     
@@ -269,11 +268,11 @@
         }
         long lockid = -1L;
         try {
-          lockid = client.startUpdate(regionsToDelete[r]);
-          client.delete(lockid, COL_REGIONINFO);
-          client.delete(lockid, COL_SERVER);
-          client.delete(lockid, COL_STARTCODE);
-          client.commit(lockid);
+          lockid = table.startUpdate(regionsToDelete[r]);
+          table.delete(lockid, COL_REGIONINFO);
+          table.delete(lockid, COL_SERVER);
+          table.delete(lockid, COL_STARTCODE);
+          table.commit(lockid);
           lockid = -1L;
 
           if(LOG.isDebugEnabled()) {
@@ -282,7 +281,7 @@
         } finally {
           try {
             if(lockid != -1L) {
-              client.abort(lockid);
+              table.abort(lockid);
             }
 
           } catch(IOException iex) {
@@ -296,9 +295,9 @@
       newRegion.getRegionInfo().write(s);
       long lockid = -1L;
       try {
-        lockid = client.startUpdate(newRegion.getRegionName());
-        client.put(lockid, COL_REGIONINFO, byteValue.toByteArray());
-        client.commit(lockid);
+        lockid = table.startUpdate(newRegion.getRegionName());
+        table.put(lockid, COL_REGIONINFO, byteValue.toByteArray());
+        table.commit(lockid);
         lockid = -1L;
 
         if(LOG.isDebugEnabled()) {
@@ -308,7 +307,7 @@
       } finally {
         try {
           if(lockid != -1L) {
-            client.abort(lockid);
+            table.abort(lockid);
           }
 
         } catch(IOException iex) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java Thu Aug  2 01:15:26 2007
@@ -1715,22 +1715,22 @@
     meta.commit(writeid, System.currentTimeMillis());
   }
   
-  static void addRegionToMETA(final HClient client,
+  static void addRegionToMETA(final Configuration conf,
       final Text table, final HRegion region,
       final HServerAddress serverAddress,
       final long startCode)
   throws IOException {
-    client.openTable(table);
+    HTable t = new HTable(conf, table);
     ByteArrayOutputStream bytes = new ByteArrayOutputStream();
     DataOutputStream out = new DataOutputStream(bytes);
     region.getRegionInfo().write(out);
-    long lockid = client.startUpdate(region.getRegionName());
-    client.put(lockid, COL_REGIONINFO, bytes.toByteArray());
-    client.put(lockid, COL_SERVER,
+    long lockid = t.startUpdate(region.getRegionName());
+    t.put(lockid, COL_REGIONINFO, bytes.toByteArray());
+    t.put(lockid, COL_SERVER,
       serverAddress.toString().getBytes(UTF8_ENCODING));
-    client.put(lockid, COL_STARTCODE,
+    t.put(lockid, COL_STARTCODE,
       String.valueOf(startCode).getBytes(UTF8_ENCODING));
-    client.commit(lockid);
+    t.commit(lockid);
     if (LOG.isDebugEnabled()) {
       LOG.info("Added region " + region.getRegionName() + " to table " +
         table);
@@ -1739,20 +1739,20 @@
   
   /**
    * Delete <code>region</code> from META <code>table</code>.
-   * @param client Client to use running update.
+   * @param conf Configuration object
    * @param table META table we are to delete region from.
    * @param regionName Region to remove.
    * @throws IOException
    */
-  static void removeRegionFromMETA(final HClient client,
+  static void removeRegionFromMETA(final Configuration conf,
       final Text table, final Text regionName)
   throws IOException {
-    client.openTable(table);
-    long lockid = client.startUpdate(regionName);
-    client.delete(lockid, COL_REGIONINFO);
-    client.delete(lockid, COL_SERVER);
-    client.delete(lockid, COL_STARTCODE);
-    client.commit(lockid);
+    HTable t = new HTable(conf, table);
+    long lockid = t.startUpdate(regionName);
+    t.delete(lockid, COL_REGIONINFO);
+    t.delete(lockid, COL_SERVER);
+    t.delete(lockid, COL_STARTCODE);
+    t.commit(lockid);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Removed " + regionName + " from table " + table);
     }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java Thu Aug  2 01:15:26 2007
@@ -102,7 +102,6 @@
   
   /** Runs periodically to determine if regions need to be compacted or split */
   class SplitOrCompactChecker implements Runnable, RegionUnavailableListener {
-    HClient client = new HClient(conf);
   
     /**
      * {@inheritDoc}
@@ -207,7 +206,7 @@
       // Remove old region from META
       for (int tries = 0; tries < numRetries; tries++) {
         try {
-          HRegion.removeRegionFromMETA(client, tableToUpdate,
+          HRegion.removeRegionFromMETA(conf, tableToUpdate,
               region.getRegionName());
           break;
         } catch (IOException e) {
@@ -224,7 +223,7 @@
       for (int i = 0; i < newRegions.length; i++) {
         for (int tries = 0; tries < numRetries; tries ++) {
           try {
-            HRegion.addRegionToMETA(client, tableToUpdate, newRegions[i],
+            HRegion.addRegionToMETA(conf, tableToUpdate, newRegions[i],
                 serverInfo.getServerAddress(), serverInfo.getStartCode());
             break;
           } catch(IOException e) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java Thu Aug  2 01:15:26 2007
@@ -94,6 +94,11 @@
         row : this.tableServers.headMap(row).lastKey();
     return this.tableServers.get(serverKey);
   }
+
+  /** @return the connection */
+  HConnection getConnection() {
+    return connection;
+  }
   
   /**
    * Verifies that no update is in progress

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java Thu Aug  2 01:15:26 2007
@@ -37,7 +37,7 @@
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 
-import org.apache.hadoop.hbase.HClient;
+import org.apache.hadoop.hbase.HTable;
 import org.apache.hadoop.hbase.HScannerInterface;
 import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.io.KeyedData;
@@ -59,7 +59,7 @@
   
   private Text m_tableName;
   Text[] m_cols;
-  HClient m_client;
+  HTable m_table;
 
   /**
    * Iterate over an HBase table data, return (HStoreKey, KeyedDataArrayWritable) pairs
@@ -78,14 +78,12 @@
     public TableRecordReader(Text startRow, Text endRow) throws IOException {
       LOG.debug("start construct");
       m_row = new TreeMap<Text, byte[]>();
-      m_scanner = m_client.obtainScanner(m_cols, startRow);
+      m_scanner = m_table.obtainScanner(m_cols, startRow);
       m_endRow = endRow;
       LOG.debug("end construct");
     }
 
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.mapred.RecordReader#close()
-     */
+    /** {@inheritDoc} */
     public void close() throws IOException {
       LOG.debug("start close");
       m_scanner.close();
@@ -110,18 +108,14 @@
       return new KeyedDataArrayWritable();
     }
 
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.mapred.RecordReader#getPos()
-     */
+    /** {@inheritDoc} */
     public long getPos() {
       // This should be the ordinal tuple in the range; 
       // not clear how to calculate...
       return 0;
     }
 
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.mapred.RecordReader#getProgress()
-     */
+    /** {@inheritDoc} */
     public float getProgress() {
       // Depends on the total number of tuples and getPos
       return 0;
@@ -165,9 +159,7 @@
 
   }
 
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(org.apache.hadoop.mapred.InputSplit, org.apache.hadoop.mapred.JobConf, org.apache.hadoop.mapred.Reporter)
-   */
+  /** {@inheritDoc} */
   public RecordReader getRecordReader(InputSplit split,
       @SuppressWarnings("unused") JobConf job,
       @SuppressWarnings("unused") Reporter reporter) throws IOException {
@@ -185,7 +177,7 @@
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     LOG.debug("start getSplits");
 
-    Text[] startKeys = m_client.getStartKeys();
+    Text[] startKeys = m_table.getStartKeys();
     if(startKeys == null || startKeys.length == 0) {
       throw new IOException("Expecting at least one region");
     }
@@ -199,9 +191,7 @@
     return splits;
   }
 
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.mapred.JobConfigurable#configure(org.apache.hadoop.mapred.JobConf)
-   */
+  /** {@inheritDoc} */
   public void configure(JobConf job) {
     LOG.debug("start configure");
     Path[] tableNames = job.getInputPaths();
@@ -212,18 +202,15 @@
     for(int i = 0; i < m_cols.length; i++) {
       m_cols[i] = new Text(colNames[i]);
     }
-    m_client = new HClient(job);
     try {
-      m_client.openTable(m_tableName);
-    } catch(Exception e) {
+      m_table = new HTable(job, m_tableName);
+    } catch (Exception e) {
       LOG.error(e);
     }
     LOG.debug("end configure");
   }
 
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.mapred.InputFormat#validateInput(org.apache.hadoop.mapred.JobConf)
-   */
+  /** {@inheritDoc} */
   public void validateInput(JobConf job) throws IOException {
 
     // expecting exactly one path

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java Thu Aug  2 01:15:26 2007
@@ -33,7 +33,7 @@
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.util.Progressable;
 
-import org.apache.hadoop.hbase.HClient;
+import org.apache.hadoop.hbase.HTable;
 import org.apache.hadoop.hbase.io.KeyedData;
 import org.apache.hadoop.hbase.io.KeyedDataArrayWritable;
 
@@ -57,20 +57,18 @@
    * and write to an HBase table
    */
   protected class TableRecordWriter implements RecordWriter {
-    private HClient m_client;
+    private HTable m_table;
 
     /**
      * Instantiate a TableRecordWriter with the HBase HClient for writing.
      * 
-     * @param client
+     * @param table
      */
-    public TableRecordWriter(HClient client) {
-      m_client = client;
+    public TableRecordWriter(HTable table) {
+      m_table = table;
     }
 
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.mapred.RecordWriter#close(org.apache.hadoop.mapred.Reporter)
-     */
+    /** {@inheritDoc} */
     public void close(@SuppressWarnings("unused") Reporter reporter) {}
 
     /**
@@ -87,16 +85,16 @@
 
       // start transaction
       
-      long xid = m_client.startUpdate(tKey);
+      long xid = m_table.startUpdate(tKey);
       
       for(int i = 0; i < columns.length; i++) {
         KeyedData column = columns[i];
-        m_client.put(xid, column.getKey().getColumn(), column.getData());
+        m_table.put(xid, column.getKey().getColumn(), column.getData());
       }
       
       // end transaction
       
-      m_client.commit(xid);
+      m_table.commit(xid);
 
       LOG.debug("end write");
     }
@@ -105,6 +103,7 @@
   /* (non-Javadoc)
    * @see org.apache.hadoop.mapred.OutputFormatBase#getRecordWriter(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.mapred.JobConf, java.lang.String, org.apache.hadoop.util.Progressable)
    */
+  /** {@inheritDoc} */
   @Override
   @SuppressWarnings("unused")
   public RecordWriter getRecordWriter(FileSystem ignored, JobConf job,
@@ -114,20 +113,17 @@
     
     LOG.debug("start get writer");
     Text tableName = new Text(job.get(OUTPUT_TABLE));
-    HClient client = null;
+    HTable table = null;
     try {
-      client = new HClient(job);
-      client.openTable(tableName);
+      table = new HTable(job, tableName);
     } catch(Exception e) {
       LOG.error(e);
     }
     LOG.debug("end get writer");
-    return new TableRecordWriter(client);
+    return new TableRecordWriter(table);
   }
 
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.mapred.OutputFormatBase#checkOutputSpecs(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.mapred.JobConf)
-   */
+  /** {@inheritDoc} */
   @Override
   @SuppressWarnings("unused")
   public void checkOutputSpecs(FileSystem ignored, JobConf job)

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java Thu Aug  2 01:15:26 2007
@@ -36,7 +36,7 @@
     StaticTestEnvironment.initialize();
   }
   
-  protected Configuration conf;
+  protected volatile Configuration conf;
   
   protected HBaseTestCase() {
     super();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java Thu Aug  2 01:15:26 2007
@@ -102,54 +102,69 @@
       SEQUENTIAL_WRITE,
       SCAN});
   
-  private final Configuration conf;
-  private final HClient client;
+  volatile Configuration conf;
   private boolean miniCluster = false;
   private int N = 1;
   private int R = ROWS_PER_GB;
   private static final Path PERF_EVAL_DIR = new Path("performance_evaluation");
   
-  /*
+  /**
    * Regex to parse lines in input file passed to mapreduce task.
    */
   public static final Pattern LINE_PATTERN =
     Pattern.compile("startRow=(\\d+),\\s+" +
     "perClientRunRows=(\\d+),\\s+totalRows=(\\d+),\\s+clients=(\\d+)");
   
-  /*
+  /**
    * Enum for map metrics.  Keep it out here rather than inside in the Map
    * inner-class so we can find associated properties.
    */
-  protected static enum Counter {ELAPSED_TIME, ROWS}
+  protected static enum Counter {
+    /** elapsed time */
+    ELAPSED_TIME,
+    /** number of rows */
+    ROWS}
   
   
-  public PerformanceEvaluation(final HBaseConfiguration c) {
+  /**
+   * Constructor
+   * @param c Configuration object
+   */
+  public PerformanceEvaluation(final Configuration c) {
     this.conf = c;
-    this.client = new HClient(conf);
   }
   
-  /*
+  /**
    * Implementations can have their status set.
    */
   static interface Status {
+    /**
+     * Sets status
+     * @param msg status message
+     * @throws IOException
+     */
     void setStatus(final String msg) throws IOException;
   }
   
-  /*
+  /**
    * MapReduce job that runs a performance evaluation client in each map task.
    */
   public static class EvaluationMapTask extends MapReduceBase
   implements Mapper {
+    /** configuration parameter name that contains the command */
     public final static String CMD_KEY = "EvaluationMapTask.command";
     private String cmd;
     private PerformanceEvaluation pe;
     
+    /** {@inheritDoc} */
     @Override
     public void configure(JobConf j) {
       this.cmd = j.get(CMD_KEY);
-      this.pe = new PerformanceEvaluation(new HBaseConfiguration());
+
+      this.pe = new PerformanceEvaluation(j);
     }
     
+    /** {@inheritDoc} */
     public void map(@SuppressWarnings("unused") final WritableComparable key,
       final Writable value, final OutputCollector output,
       final Reporter reporter)
@@ -160,7 +175,7 @@
         int perClientRunRows = Integer.parseInt(m.group(2));
         int totalRows = Integer.parseInt(m.group(3));
         Status status = new Status() {
-          public void setStatus(String msg) throws IOException {
+          public void setStatus(String msg) {
             reporter.setStatus(msg);
           }
         };
@@ -182,8 +197,8 @@
    * @return True if we created the table.
    * @throws IOException
    */
-  private boolean checkTable(final HClient c) throws IOException {
-    HTableDescriptor [] extantTables = c.listTables();
+  private boolean checkTable(HBaseAdmin admin) throws IOException {
+    HTableDescriptor [] extantTables = admin.listTables();
     boolean tableExists = false;
     if (extantTables.length > 0) {
       // Check to see if our table already exists.  Print warning if it does.
@@ -196,7 +211,7 @@
       }
     }
     if (!tableExists) {
-      c.createTable(tableDescriptor);
+      admin.createTable(tableDescriptor);
       LOG.info("Table " + tableDescriptor + " created");
     }
     return !tableExists;
@@ -210,7 +225,7 @@
    */
   private void runNIsMoreThanOne(final String cmd)
   throws IOException {
-    checkTable(this.client);
+    checkTable(new HBaseAdmin(conf));
     
     // Run a mapreduce job.  Run as many maps as asked-for clients.
     // Before we start up the job, write out an input file with instruction
@@ -269,20 +284,23 @@
    */
   static abstract class Test {
     protected final Random rand = new Random(System.currentTimeMillis());
-    protected final HClient client;
     protected final int startRow;
     protected final int perClientRunRows;
     protected final int totalRows;
     private final Status status;
+    protected HBaseAdmin admin;
+    protected HTable table;
+    protected volatile Configuration conf;
     
-    Test(final HClient c, final int startRow, final int perClientRunRows,
-        final int totalRows, final Status status) {
+    Test(final Configuration conf, final int startRow,
+        final int perClientRunRows, final int totalRows, final Status status) {
       super();
-      this.client = c;
       this.startRow = startRow;
       this.perClientRunRows = perClientRunRows;
       this.totalRows = totalRows;
       this.status = status;
+      this.table = null;
+      this.conf = conf;
     }
     
     /*
@@ -305,9 +323,11 @@
     }
     
     void testSetup() throws IOException {
-      this.client.openTable(tableDescriptor.getName());
+      this.admin = new HBaseAdmin(conf);
+      this.table = new HTable(conf, tableDescriptor.getName());
     }
-    
+
+    @SuppressWarnings("unused")
     void testTakedown()  throws IOException {
       // Empty
     }
@@ -355,16 +375,17 @@
   }
   
   class RandomReadTest extends Test {
-    RandomReadTest(final HClient c, final int startRow,
-      final int perClientRunRows, final int totalRows, final Status status) {
-      super(c, startRow, perClientRunRows, totalRows, status);
+    RandomReadTest(final Configuration conf, final int startRow,
+        final int perClientRunRows, final int totalRows, final Status status) {
+      super(conf, startRow, perClientRunRows, totalRows, status);
     }
     
     @Override
     void testRow(@SuppressWarnings("unused") final int i) throws IOException {
-      this.client.get(getRandomRow(), COLUMN_NAME);
+      this.table.get(getRandomRow(), COLUMN_NAME);
     }
-    
+
+    @Override
     protected int getReportingPeriod() {
       // 
       return this.perClientRunRows / 100;
@@ -377,17 +398,17 @@
   }
   
   class RandomWriteTest extends Test {
-    RandomWriteTest(final HClient c, final int startRow,
-      final int perClientRunRows, final int totalRows, final Status status) {
-      super(c, startRow, perClientRunRows, totalRows, status);
+    RandomWriteTest(final Configuration conf, final int startRow,
+        final int perClientRunRows, final int totalRows, final Status status) {
+      super(conf, startRow, perClientRunRows, totalRows, status);
     }
     
     @Override
     void testRow(@SuppressWarnings("unused") final int i) throws IOException {
       Text row = getRandomRow();
-      long lockid = client.startUpdate(row);
-      client.put(lockid, COLUMN_NAME, generateValue());
-      client.commit(lockid);
+      long lockid = table.startUpdate(row);
+      table.put(lockid, COLUMN_NAME, generateValue());
+      table.commit(lockid);
     }
 
     @Override
@@ -401,15 +422,15 @@
     private HStoreKey key = new HStoreKey();
     private TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
     
-    ScanTest(final HClient c, final int startRow, final int perClientRunRows,
-        final int totalRows, final Status status) {
-      super(c, startRow, perClientRunRows, totalRows, status);
+    ScanTest(final Configuration conf, final int startRow,
+        final int perClientRunRows, final int totalRows, final Status status) {
+      super(conf, startRow, perClientRunRows, totalRows, status);
     }
     
     @Override
     void testSetup() throws IOException {
       super.testSetup();
-      this.testScanner = client.obtainScanner(new Text[] {COLUMN_NAME},
+      this.testScanner = table.obtainScanner(new Text[] {COLUMN_NAME},
           new Text(Integer.toString(this.startRow)));
     }
     
@@ -435,14 +456,14 @@
   }
   
   class SequentialReadTest extends Test {
-    SequentialReadTest(final HClient c, final int startRow,
+    SequentialReadTest(final Configuration conf, final int startRow,
         final int perClientRunRows, final int totalRows, final Status status) {
-      super(c, startRow, perClientRunRows, totalRows, status);
+      super(conf, startRow, perClientRunRows, totalRows, status);
     }
     
     @Override
     void testRow(final int i) throws IOException {
-      client.get(new Text(Integer.toString(i)), COLUMN_NAME);
+      table.get(new Text(Integer.toString(i)), COLUMN_NAME);
     }
 
     @Override
@@ -452,16 +473,16 @@
   }
   
   class SequentialWriteTest extends Test {
-    SequentialWriteTest(final HClient c, final int startRow,
+    SequentialWriteTest(final Configuration conf, final int startRow,
         final int perClientRunRows, final int totalRows, final Status status) {
-      super(c, startRow, perClientRunRows, totalRows, status);
+      super(conf, startRow, perClientRunRows, totalRows, status);
     }
     
     @Override
     void testRow(final int i) throws IOException {
-      long lockid = client.startUpdate(new Text(Integer.toString(i)));
-      client.put(lockid, COLUMN_NAME, generateValue());
-      client.commit(lockid);
+      long lockid = table.startUpdate(new Text(Integer.toString(i)));
+      table.put(lockid, COLUMN_NAME, generateValue());
+      table.commit(lockid);
     }
 
     @Override
@@ -477,25 +498,25 @@
       perClientRunRows + " rows");
     long totalElapsedTime = 0;
     if (cmd.equals(RANDOM_READ)) {
-      Test t = new RandomReadTest(this.client, startRow, perClientRunRows,
+      Test t = new RandomReadTest(this.conf, startRow, perClientRunRows,
         totalRows, status);
       totalElapsedTime = t.test();
     } else if (cmd.equals(RANDOM_READ_MEM)) {
       throw new UnsupportedOperationException("Not yet implemented");
     } else if (cmd.equals(RANDOM_WRITE)) {
-      Test t = new RandomWriteTest(this.client, startRow, perClientRunRows,
+      Test t = new RandomWriteTest(this.conf, startRow, perClientRunRows,
         totalRows, status);
       totalElapsedTime = t.test();
     } else if (cmd.equals(SCAN)) {
-      Test t = new ScanTest(this.client, startRow, perClientRunRows,
+      Test t = new ScanTest(this.conf, startRow, perClientRunRows,
         totalRows, status);
       totalElapsedTime = t.test();
     } else if (cmd.equals(SEQUENTIAL_READ)) {
-      Test t = new SequentialReadTest(this.client, startRow, perClientRunRows,
+      Test t = new SequentialReadTest(this.conf, startRow, perClientRunRows,
         totalRows, status);
       totalElapsedTime = t.test();
     } else if (cmd.equals(SEQUENTIAL_WRITE)) {
-      Test t = new SequentialWriteTest(this.client, startRow, perClientRunRows,
+      Test t = new SequentialWriteTest(this.conf, startRow, perClientRunRows,
         totalRows, status);
       totalElapsedTime = t.test();
     } else {
@@ -513,9 +534,11 @@
         LOG.info(msg);
       }
     };
-    
+
+    HBaseAdmin admin = null;
     try {
-      checkTable(this.client);
+      admin = new HBaseAdmin(this.conf);
+      checkTable(admin);
 
       if (cmd.equals(RANDOM_READ) || cmd.equals(RANDOM_READ_MEM) ||
           cmd.equals(SCAN) || cmd.equals(SEQUENTIAL_READ)) {
@@ -529,7 +552,9 @@
       LOG.error("Failed", e);
     } finally {
       LOG.info("Deleting table " + tableDescriptor.getName());
-      this.client.deleteTable(tableDescriptor.getName());
+      if (admin != null) {
+        admin.deleteTable(tableDescriptor.getName());
+      }
     }
   }
   

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java Thu Aug  2 01:15:26 2007
@@ -33,7 +33,7 @@
   private byte[] value;
 
   private HTableDescriptor desc = null;
-  private HClient client = null;
+  private HTable table = null;
 
   /** constructor */
   public TestBatchUpdate() {
@@ -51,12 +51,12 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    this.client = new HClient(conf);
     this.desc = new HTableDescriptor("test");
     desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
     try {
-      client.createTable(desc);
-      client.openTable(desc.getName());
+      HBaseAdmin admin = new HBaseAdmin(conf);
+      admin.createTable(desc);
+      table = new HTable(conf, desc.getName());
       
     } catch (Exception e) {
       e.printStackTrace();
@@ -67,7 +67,7 @@
   /** the test case */
   public void testBatchUpdate() {
     try {
-      client.commitBatch(-1L);
+      table.commitBatch(-1L);
       
     } catch (IllegalStateException e) {
       // expected
@@ -76,37 +76,28 @@
       fail();
     }
 
-    long lockid = client.startBatchUpdate(new Text("row1"));
+    long lockid = table.startBatchUpdate(new Text("row1"));
     
     try {
-      client.openTable(HConstants.META_TABLE_NAME);
-      
-    } catch (IllegalStateException e) {
-      // expected
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail();
-    }
-    try {
       try {
         @SuppressWarnings("unused")
-        long dummy = client.startUpdate(new Text("row2"));
+        long dummy = table.startUpdate(new Text("row2"));
       } catch (IllegalStateException e) {
         // expected
       } catch (Exception e) {
         e.printStackTrace();
         fail();
       }
-      client.put(lockid, CONTENTS, value);
-      client.delete(lockid, CONTENTS);
-      client.commitBatch(lockid);
+      table.put(lockid, CONTENTS, value);
+      table.delete(lockid, CONTENTS);
+      table.commitBatch(lockid);
       
-      lockid = client.startBatchUpdate(new Text("row2"));
-      client.put(lockid, CONTENTS, value);
-      client.commit(lockid);
+      lockid = table.startBatchUpdate(new Text("row2"));
+      table.put(lockid, CONTENTS, value);
+      table.commit(lockid);
  
       Text[] columns = { CONTENTS };
-      HScannerInterface scanner = client.obtainScanner(columns, new Text());
+      HScannerInterface scanner = table.obtainScanner(columns, new Text());
       HStoreKey key = new HStoreKey();
       TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
       while(scanner.next(key, results)) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java Thu Aug  2 01:15:26 2007
@@ -29,7 +29,7 @@
   private static final Text CONTENTS = new Text("contents:");
 
   private HTableDescriptor desc = null;
-  private HClient client = null;
+  private HTable table = null;
   
   private static final Text[] rows = {
     new Text("wmjwjzyv"),
@@ -150,11 +150,11 @@
     Logger.getLogger(HStore.class).setLevel(Level.DEBUG);
   }
   
+  /** {@inheritDoc} */
   @Override
   public void setUp() {
     try {
       super.setUp();
-      this.client = new HClient(conf);
       this.desc = new HTableDescriptor("test");
       desc.addFamily(
           new HColumnDescriptor(CONTENTS, 1, HColumnDescriptor.CompressionType.NONE,
@@ -164,17 +164,18 @@
                   12499,                              // number of bits
                   4                                   // number of hash functions
               )));                                    // false positive = 0.0000001
-      client.createTable(desc);
-      client.openTable(desc.getName());
+      HBaseAdmin admin = new HBaseAdmin(conf);
+      admin.createTable(desc);
+      table = new HTable(conf, desc.getName());
 
       // Store some values
 
       for(int i = 0; i < 100; i++) {
         Text row = rows[i];
         String value = row.toString();
-        long lockid = client.startUpdate(rows[i]);
-        client.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
-        client.commit(lockid);
+        long lockid = table.startUpdate(rows[i]);
+        table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
+        table.commit(lockid);
       }
     } catch (Exception e) {
       e.printStackTrace();
@@ -195,7 +196,7 @@
     
     try {
       for(int i = 0; i < testKeys.length; i++) {
-        byte[] value = client.get(testKeys[i], CONTENTS);
+        byte[] value = table.get(testKeys[i], CONTENTS);
         if(value != null && value.length != 0) {
           System.err.println("non existant key: " + testKeys[i] +
               " returned value: " + new String(value, HConstants.UTF8_ENCODING));

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java Thu Aug  2 01:15:26 2007
@@ -30,7 +30,7 @@
  * Tests region server failover when a region server exits.
  */
 public class TestCleanRegionServerExit extends HBaseClusterTestCase {
-  private HClient client;
+  private HTable table;
 
   /** constructor */
   public TestCleanRegionServerExit() {
@@ -48,7 +48,6 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    this.client = new HClient(conf);
   }
   
   /**
@@ -57,19 +56,21 @@
    */
   public void testCleanRegionServerExit() throws IOException {
     // When the META table can be opened, the region servers are running
-    this.client.openTable(HConstants.META_TABLE_NAME);
+    @SuppressWarnings("unused")
+    HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
     // Put something into the meta table.
     String tableName = getName();
     HTableDescriptor desc = new HTableDescriptor(tableName);
     desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
-    this.client.createTable(desc);
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
     // put some values in the table
-    this.client.openTable(new Text(tableName));
+    this.table = new HTable(conf, new Text(tableName));
     Text row = new Text("row1");
-    long lockid = client.startUpdate(row);
-    client.put(lockid, HConstants.COLUMN_FAMILY,
+    long lockid = table.startUpdate(row);
+    table.put(lockid, HConstants.COLUMN_FAMILY,
         tableName.getBytes(HConstants.UTF8_ENCODING));
-    client.commit(lockid);
+    table.commit(lockid);
     // Start up a new region server to take over serving of root and meta
     // after we shut down the current meta/root host.
     this.cluster.startRegionServer();
@@ -81,7 +82,7 @@
     // to a different server
 
     HScannerInterface scanner =
-      client.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text());
+      table.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text());
 
     try {
       HStoreKey key = new HStoreKey();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java Thu Aug  2 01:15:26 2007
@@ -32,13 +32,15 @@
 public class TestHBaseCluster extends HBaseClusterTestCase {
 
   private HTableDescriptor desc;
-  private HClient client;
+  private HBaseAdmin admin;
+  private HTable table;
 
   /** constructor */
   public TestHBaseCluster() {
     super(true);
     this.desc = null;
-    this.client = null;
+    this.admin = null;
+    this.table = null;
   }
 
   /**
@@ -55,6 +57,8 @@
     cleanup();
   }
 
+  /** {@inheritDoc} */
+  @Override
   public void tearDown() throws Exception {
     super.tearDown();
   }
@@ -69,11 +73,12 @@
   private static final String ANCHORSTR = "anchorstr";
 
   private void setup() throws IOException {
-    client = new HClient(conf);
     desc = new HTableDescriptor("test");
     desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
     desc.addFamily(new HColumnDescriptor(ANCHOR.toString()));
-    client.createTable(desc);
+    admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    table = new HTable(conf, desc.getName());
   }
       
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
@@ -81,15 +86,13 @@
   private void basic() throws IOException {
     long startTime = System.currentTimeMillis();
 
-    client.openTable(desc.getName());
-
     // Write out a bunch of values
 
     for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
-      long writeid = client.startUpdate(new Text("row_" + k));
-      client.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
-      client.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
-      client.commit(writeid);
+      long writeid = table.startUpdate(new Text("row_" + k));
+      table.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
+      table.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
+      table.commit(writeid);
     }
     System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
         + ((System.currentTimeMillis() - startTime) / 1000.0));
@@ -102,7 +105,7 @@
     for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
       Text rowlabel = new Text("row_" + k);
 
-      byte bodydata[] = client.get(rowlabel, CONTENTS_BASIC);
+      byte bodydata[] = table.get(rowlabel, CONTENTS_BASIC);
       assertNotNull(bodydata);
       String bodystr = new String(bodydata).toString().trim();
       String teststr = CONTENTSTR + k;
@@ -110,7 +113,7 @@
           + "), expected: '" + teststr + "' got: '" + bodystr + "'",
           bodystr, teststr);
       collabel = new Text(ANCHORNUM + k);
-      bodydata = client.get(rowlabel, collabel);
+      bodydata = table.get(rowlabel, collabel);
       bodystr = new String(bodydata).toString().trim();
       teststr = ANCHORSTR + k;
       assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
@@ -130,7 +133,7 @@
     
     long startTime = System.currentTimeMillis();
     
-    HScannerInterface s = client.obtainScanner(cols, new Text());
+    HScannerInterface s = table.obtainScanner(cols, new Text());
     try {
 
       int contentsFetched = 0;
@@ -178,7 +181,7 @@
   }
 
   private void listTables() throws IOException {
-    HTableDescriptor[] tables = client.listTables();
+    HTableDescriptor[] tables = admin.listTables();
     assertEquals(1, tables.length);
     assertEquals(desc.getName(), tables[0].getName());
     Set<Text> families = tables[0].families().keySet();
@@ -191,6 +194,6 @@
 
     // Delete the table we created
 
-    client.deleteTable(desc.getName());
+    admin.deleteTable(desc.getName());
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java Thu Aug  2 01:15:26 2007
@@ -25,15 +25,21 @@
 /**
  * Test HClient.
  */
+@Deprecated
 public class TestHClient extends HBaseClusterTestCase {
   private Log LOG = LogFactory.getLog(this.getClass().getName());
   private HClient client;
 
+  /** {@inheritDoc} */
+  @Override
   public void setUp() throws Exception {
     super.setUp();
     this.client = new HClient(this.conf);
   }
   
+  /** the test
+   * @throws Exception
+   */
   public void testCommandline() throws Exception {
     final String m =  "--master=" + this.conf.get(HConstants.MASTER_ADDRESS);
     LOG.info("Creating table");

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java Thu Aug  2 01:15:26 2007
@@ -21,6 +21,7 @@
 
 import org.apache.hadoop.io.Text;
 
+/** tests administrative functions */
 public class TestMasterAdmin extends HBaseClusterTestCase {
   private static final Text COLUMN_NAME = new Text("col1:");
   private static HTableDescriptor testDesc;
@@ -29,17 +30,20 @@
     testDesc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
   }
   
-  private HClient client;
+  private HBaseAdmin admin;
 
+  /** constructor */
   public TestMasterAdmin() {
     super(true);
-    client = new HClient(conf);
+    admin = null;
   }
   
+  /** the test */
   public void testMasterAdmin() {
     try {
-      client.createTable(testDesc);
-      client.disableTable(testDesc.getName());
+      admin = new HBaseAdmin(conf);
+      admin.createTable(testDesc);
+      admin.disableTable(testDesc.getName());
       
     } catch(Exception e) {
       e.printStackTrace();
@@ -48,23 +52,24 @@
 
     try {
       try {
-        client.openTable(testDesc.getName());
+        @SuppressWarnings("unused")
+        HTable table = new HTable(conf, testDesc.getName());
 
       } catch(IllegalStateException e) {
         // Expected
       }
 
-      client.addColumn(testDesc.getName(), new HColumnDescriptor("col2:"));
-      client.enableTable(testDesc.getName());
+      admin.addColumn(testDesc.getName(), new HColumnDescriptor("col2:"));
+      admin.enableTable(testDesc.getName());
       try {
-        client.deleteColumn(testDesc.getName(), new Text("col2:"));
+        admin.deleteColumn(testDesc.getName(), new Text("col2:"));
         
       } catch(TableNotDisabledException e) {
         // Expected
       }
 
-      client.disableTable(testDesc.getName());
-      client.deleteColumn(testDesc.getName(), new Text("col2:"));
+      admin.disableTable(testDesc.getName());
+      admin.deleteColumn(testDesc.getName(), new Text("col2:"));
       
     } catch(Exception e) {
       e.printStackTrace();
@@ -72,7 +77,7 @@
       
     } finally {
       try {
-        client.deleteTable(testDesc.getName());
+        admin.deleteTable(testDesc.getName());
         
       } catch(Exception e) {
         e.printStackTrace();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java Thu Aug  2 01:15:26 2007
@@ -30,7 +30,7 @@
   private static final byte[] value = { 1, 2, 3, 4 };
 
   private HTableDescriptor desc = null;
-  private HClient client = null;
+  private HTable table = null;
 
   /**
    * {@inheritDoc}
@@ -38,12 +38,12 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    this.client = new HClient(conf);
     this.desc = new HTableDescriptor("test");
     desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
     try {
-      client.createTable(desc);
-      client.openTable(desc.getName());
+      HBaseAdmin admin = new HBaseAdmin(conf);
+      admin.createTable(desc);
+      table = new HTable(conf, desc.getName());
       
     } catch (Exception e) {
       e.printStackTrace();
@@ -54,61 +54,47 @@
   /** the test */
   public void testMultipleUpdates() {
     try {
-      long lockid = client.startUpdate(new Text("row1"));
+      long lockid = table.startUpdate(new Text("row1"));
       
       try {
-        long lockid2 = client.startUpdate(new Text("row2"));
+        long lockid2 = table.startUpdate(new Text("row2"));
         throw new Exception("second startUpdate returned lock id " + lockid2);
         
       } catch (IllegalStateException i) {
         // expected
       }
       
-      try {
-        client.openTable(HConstants.META_TABLE_NAME);
-        
-      } catch (IllegalStateException i) {
-        // expected
-      }
-      
       long invalidid = 42;
       
       try {
-        client.put(invalidid, CONTENTS, value);
-        
-      } catch (IllegalArgumentException i) {
-        // expected
-      }
-      
-      try {
-        client.delete(invalidid, CONTENTS);
+        table.put(invalidid, CONTENTS, value);
         
       } catch (IllegalArgumentException i) {
         // expected
       }
       
       try {
-        client.put(invalidid, CONTENTS, value);
+        table.delete(invalidid, CONTENTS);
         
       } catch (IllegalArgumentException i) {
         // expected
       }
       
       try {
-        client.abort(invalidid);
+        table.abort(invalidid);
         
       } catch (IllegalArgumentException i) {
         // expected
       }
       
       try {
-        client.commit(invalidid);
+        table.commit(invalidid);
         
       } catch (IllegalArgumentException i) {
         // expected
       }
       
-      client.abort(lockid);
+      table.abort(lockid);
       
     } catch (Exception e) {
       System.err.println("unexpected exception");

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java Thu Aug  2 01:15:26 2007
@@ -30,7 +30,7 @@
  * Tests region server failover when a region server exits.
  */
 public class TestRegionServerAbort extends HBaseClusterTestCase {
-  private HClient client;
+  private HTable table;
 
   /** constructor */
   public TestRegionServerAbort() {
@@ -48,7 +48,6 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    this.client = new HClient(conf);
   }
   
   /**
@@ -57,19 +56,21 @@
    */
   public void testRegionServerAbort() throws IOException {
     // When the META table can be opened, the region servers are running
-    this.client.openTable(HConstants.META_TABLE_NAME);
+    @SuppressWarnings("unused")
+    HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
     // Put something into the meta table.
     String tableName = getName();
     HTableDescriptor desc = new HTableDescriptor(tableName);
     desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
-    this.client.createTable(desc);
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
     // put some values in the table
-    this.client.openTable(new Text(tableName));
+    this.table = new HTable(conf, new Text(tableName));
     Text row = new Text("row1");
-    long lockid = client.startUpdate(row);
-    client.put(lockid, HConstants.COLUMN_FAMILY,
+    long lockid = table.startUpdate(row);
+    table.put(lockid, HConstants.COLUMN_FAMILY,
         tableName.getBytes(HConstants.UTF8_ENCODING));
-    client.commit(lockid);
+    table.commit(lockid);
     // Start up a new region server to take over serving of root and meta
     // after we shut down the current meta/root host.
     this.cluster.startRegionServer();
@@ -81,7 +82,7 @@
     // to a different server
 
     HScannerInterface scanner =
-      client.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text());
+      table.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text());
 
     try {
       HStoreKey key = new HStoreKey();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java Thu Aug  2 01:15:26 2007
@@ -30,6 +30,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.filter.RegExpRowFilter;
 import org.apache.hadoop.hbase.filter.RowFilterInterface;
@@ -64,7 +65,7 @@
    */
   public void testScannerFilter() throws Exception {
     // Setup HClient, ensure that it is running correctly
-    HClient client = new HClient(this.conf);
+    HBaseAdmin admin = new HBaseAdmin(conf);
     
     // Setup colkeys to be inserted
     HTableDescriptor htd = new HTableDescriptor(getName());
@@ -75,28 +76,28 @@
         (char)(FIRST_COLKEY + i), ':' }));
       htd.addFamily(new HColumnDescriptor(colKeys[i].toString()));
     }
-    client.createTable(htd);
+    admin.createTable(htd);
     assertTrue("Table with name " + tableName + " created successfully.", 
-        client.tableExists(tableName));
-    assertTrue("Master is running.", client.isMasterRunning());
+        admin.tableExists(tableName));
+    assertTrue("Master is running.", admin.isMasterRunning());
     
     // Enter data
-    client.openTable(tableName);
+    HTable table = new HTable(conf, tableName);
     for (char i = FIRST_ROWKEY; i <= LAST_ROWKEY; i++) {
       Text rowKey = new Text(new String(new char[] { i }));
-      long lockID = client.startUpdate(rowKey);
+      long lockID = table.startUpdate(rowKey);
       for (char j = 0; j < colKeys.length; j++) {
-        client.put(lockID, colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY && 
+        table.put(lockID, colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY && 
           i <= LAST_BAD_RANGE_ROWKEY)? BAD_BYTES : GOOD_BYTES);
       }
-      client.commit(lockID);
+      table.commit(lockID);
     }
     
-    regExpFilterTest(client, colKeys);
-    rowFilterSetTest(client, colKeys);
+    regExpFilterTest(table, colKeys);
+    rowFilterSetTest(table, colKeys);
   }
   
-  private void regExpFilterTest(HClient client, Text[] colKeys) 
+  private void regExpFilterTest(HTable table, Text[] colKeys) 
     throws Exception {
     // Get the filter.  The RegExpRowFilter used should filter out vowels.
     Map<Text, byte[]> colCriteria = new TreeMap<Text, byte[]>();
@@ -106,14 +107,14 @@
     RowFilterInterface filter = new RegExpRowFilter("[^aeiou]", colCriteria);
 
     // Create the scanner from the filter.
-    HScannerInterface scanner = client.obtainScanner(colKeys, new Text(new 
+    HScannerInterface scanner = table.obtainScanner(colKeys, new Text(new 
       String(new char[] { FIRST_ROWKEY })), filter);
 
     // Iterate over the scanner, ensuring that results match the passed regex.
     iterateOnScanner(scanner, "[^aei-qu]");
   }
   
-  private void rowFilterSetTest(HClient client, Text[] colKeys) 
+  private void rowFilterSetTest(HTable table, Text[] colKeys) 
     throws Exception {
     // Get the filter.  The RegExpRowFilter used should filter out vowels and 
     // the WhileMatchRowFilter(StopRowFilter) should filter out all rows 
@@ -125,7 +126,7 @@
       new RowFilterSet(RowFilterSet.Operator.MUST_PASS_ALL, filterSet);
     
     // Create the scanner from the filter.
-    HScannerInterface scanner = client.obtainScanner(colKeys, new Text(new 
+    HScannerInterface scanner = table.obtainScanner(colKeys, new Text(new 
         String(new char[] { FIRST_ROWKEY })), filter);
     
     // Iterate over the scanner, ensuring that results match the passed regex.
@@ -159,16 +160,17 @@
    */
   public void testSplitDeleteOneAddTwoRegions() throws IOException {
     // First add a new table.  Its intial region will be added to META region.
-    HClient client = new HClient(this.conf);
-    client.createTable(new HTableDescriptor(getName()));
-    List<HRegionInfo> regions = scan(client, HConstants.META_TABLE_NAME);
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    Text tableName = new Text(getName());
+    admin.createTable(new HTableDescriptor(tableName.toString()));
+    List<HRegionInfo> regions = scan(conf, HConstants.META_TABLE_NAME);
     assertEquals("Expected one region", regions.size(), 1);
     HRegionInfo region = regions.get(0);
     assertTrue("Expected region named for test",
       region.regionName.toString().startsWith(getName()));
     // Now do what happens at split time; remove old region and then add two
     // new ones in its place.
-    HRegion.removeRegionFromMETA(client, HConstants.META_TABLE_NAME,
+    HRegion.removeRegionFromMETA(conf, HConstants.META_TABLE_NAME,
       region.regionName);
     HTableDescriptor desc = region.tableDesc;
     Path homedir = new Path(getName());
@@ -181,10 +183,10 @@
         homedir, this.conf, null));
     try {
       for (HRegion r : newRegions) {
-        HRegion.addRegionToMETA(client, HConstants.META_TABLE_NAME, r,
+        HRegion.addRegionToMETA(conf, HConstants.META_TABLE_NAME, r,
             this.cluster.getHMasterAddress(), -1L);
       }
-      regions = scan(client, HConstants.META_TABLE_NAME);
+      regions = scan(conf, HConstants.META_TABLE_NAME);
       assertEquals("Should be two regions only", 2, regions.size());
     } finally {
       for (HRegion r : newRegions) {
@@ -194,15 +196,15 @@
     }
   }
   
-  private List<HRegionInfo> scan(final HClient client, final Text table)
+  private List<HRegionInfo> scan(final Configuration conf, final Text table)
   throws IOException {
     List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
     HRegionInterface regionServer = null;
     long scannerId = -1L;
     try {
-      client.openTable(table);
-      HRegionLocation rl = client.getRegionLocation(table);
-      regionServer = client.getHRegionConnection(rl.getServerAddress());
+      HTable t = new HTable(conf, table);
+      HRegionLocation rl = t.getRegionLocation(table);
+      regionServer = t.getConnection().getHRegionConnection(rl.getServerAddress());
       scannerId = regionServer.openScanner(rl.getRegionInfo().getRegionName(),
           HMaster.METACOLUMNS, new Text(), System.currentTimeMillis(), null);
       while (true) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java Thu Aug  2 01:15:26 2007
@@ -25,15 +25,20 @@
 
 /** Tests table creation restrictions*/
 public class TestTable extends HBaseClusterTestCase {
+  /** constructor */
   public TestTable() {
     super(true);
   }
 
+  /**
+   * the test
+   * @throws IOException
+   */
   public void testCreateTable() throws IOException {
-    final HClient client = new HClient(conf);
+    final HBaseAdmin admin = new HBaseAdmin(conf);
     String msg = null;
     try {
-      client.createTable(HGlobals.rootTableDesc);
+      admin.createTable(HGlobals.rootTableDesc);
     } catch (IllegalArgumentException e) {
       msg = e.toString();
     }
@@ -43,7 +48,7 @@
     
     msg = null;
     try {
-      client.createTable(HGlobals.metaTableDesc);
+      admin.createTable(HGlobals.metaTableDesc);
     } catch(IllegalArgumentException e) {
       msg = e.toString();
     }
@@ -55,9 +60,9 @@
     msg = null;
     HTableDescriptor desc = new HTableDescriptor(getName());
     desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
-    client.createTable(desc);
+    admin.createTable(desc);
     try {
-      client.createTable(desc);
+      admin.createTable(desc);
     } catch (TableExistsException e) {
       msg = e.getMessage();
     }
@@ -78,7 +83,7 @@
         @Override
         public void run() {
           try {
-            client.createTable(threadDesc);
+            admin.createTable(threadDesc);
             successes.incrementAndGet();
           } catch (TableExistsException e) {
             failures.incrementAndGet();
@@ -111,10 +116,11 @@
    * @throws Exception
    */
   public void testTableNameClash() throws Exception {
-    HClient client = new HClient(conf);
-    client.createTable(new HTableDescriptor(getName() + "SOMEUPPERCASE"));
-    client.createTable(new HTableDescriptor(getName()));
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(new HTableDescriptor(getName() + "SOMEUPPERCASE"));
+    admin.createTable(new HTableDescriptor(getName()));
     // Before fix, below would fail throwing a NoServerForRegionException.
-    client.openTable(new Text(getName()));
+    @SuppressWarnings("unused")
+    HTable table = new HTable(conf, new Text(getName()));
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTableMapReduce.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTableMapReduce.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTableMapReduce.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTableMapReduce.java Thu Aug  2 01:15:26 2007
@@ -225,15 +225,14 @@
   }
   
   private void scanTable(Configuration conf) throws IOException {
-    HClient client = new HClient(conf);
-    client.openTable(new Text(TABLE_NAME));
+    HTable table = new HTable(conf, new Text(TABLE_NAME));
     
     Text[] columns = {
         TEXT_INPUT_COLUMN,
         TEXT_OUTPUT_COLUMN
     };
     HScannerInterface scanner =
-      client.obtainScanner(columns, HClient.EMPTY_START_ROW);
+      table.obtainScanner(columns, HConstants.EMPTY_START_ROW);
     
     try {
       HStoreKey key = new HStoreKey();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTimestamp.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTimestamp.java?view=diff&rev=562041&r1=562040&r2=562041
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTimestamp.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTimestamp.java Thu Aug  2 01:15:26 2007
@@ -38,12 +38,11 @@
   private static final Text TABLE = new Text(TABLE_NAME);
   private static final Text ROW = new Text("row");
   
-  private HClient client;
+  private HTable table;
 
   /** constructor */
   public TestTimestamp() {
     super();
-    client = new HClient(conf);
   }
 
   /** {@inheritDoc} */
@@ -55,7 +54,8 @@
     desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
 
     try {
-      client.createTable(desc);
+      HBaseAdmin admin = new HBaseAdmin(conf);
+      admin.createTable(desc);
       
     } catch (Exception e) {
       e.printStackTrace();
@@ -66,42 +66,42 @@
   /** the test */
   public void testTimestamp() {
     try {
-      client.openTable(TABLE);
+      table = new HTable(conf, TABLE);
       
       // store a value specifying an update time
 
-      long lockid = client.startUpdate(ROW);
-      client.put(lockid, COLUMN, VERSION1.getBytes(HConstants.UTF8_ENCODING));
-      client.commit(lockid, T0);
+      long lockid = table.startUpdate(ROW);
+      table.put(lockid, COLUMN, VERSION1.getBytes(HConstants.UTF8_ENCODING));
+      table.commit(lockid, T0);
       
       // store a value specifying 'now' as the update time
       
-      lockid = client.startUpdate(ROW);
-      client.put(lockid, COLUMN, LATEST.getBytes(HConstants.UTF8_ENCODING));
-      client.commit(lockid);
+      lockid = table.startUpdate(ROW);
+      table.put(lockid, COLUMN, LATEST.getBytes(HConstants.UTF8_ENCODING));
+      table.commit(lockid);
       
       // delete values older than T1
       
-      lockid = client.startUpdate(ROW);
-      client.delete(lockid, COLUMN);
-      client.commit(lockid, T1);
+      lockid = table.startUpdate(ROW);
+      table.delete(lockid, COLUMN);
+      table.commit(lockid, T1);
       
       // now retrieve...
       
       // the most recent version:
       
-      byte[] bytes = client.get(ROW, COLUMN);
+      byte[] bytes = table.get(ROW, COLUMN);
       assertTrue(bytes != null && bytes.length != 0);
       assertTrue(LATEST.equals(new String(bytes, HConstants.UTF8_ENCODING)));
       
       // any version <= time T1
       
-      byte[][] values = client.get(ROW, COLUMN, T1, 3);
+      byte[][] values = table.get(ROW, COLUMN, T1, 3);
       assertNull(values);
       
       // the version from T0
       
-      values = client.get(ROW, COLUMN, T0, 3);
+      values = table.get(ROW, COLUMN, T0, 3);
       assertTrue(values.length == 1
           && VERSION1.equals(new String(values[0], HConstants.UTF8_ENCODING)));
 
@@ -116,31 +116,31 @@
       
       // the most recent version:
       
-      bytes = client.get(ROW, COLUMN);
+      bytes = table.get(ROW, COLUMN);
       assertTrue(bytes != null && bytes.length != 0);
       assertTrue(LATEST.equals(new String(bytes, HConstants.UTF8_ENCODING)));
       
       // any version <= time T1
       
-      values = client.get(ROW, COLUMN, T1, 3);
+      values = table.get(ROW, COLUMN, T1, 3);
       assertNull(values);
       
       // the version from T0
       
-      values = client.get(ROW, COLUMN, T0, 3);
+      values = table.get(ROW, COLUMN, T0, 3);
       assertTrue(values.length == 1
           && VERSION1.equals(new String(values[0], HConstants.UTF8_ENCODING)));
 
       // three versions older than now
       
-      values = client.get(ROW, COLUMN, 3);
+      values = table.get(ROW, COLUMN, 3);
       assertTrue(values.length == 1
           && LATEST.equals(new String(values[0], HConstants.UTF8_ENCODING)));
       
       // Test scanners
       
       HScannerInterface scanner =
-        client.obtainScanner(COLUMNS, HClient.EMPTY_START_ROW);
+        table.obtainScanner(COLUMNS, HConstants.EMPTY_START_ROW);
       try {
         HStoreKey key = new HStoreKey();
         TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
@@ -155,7 +155,7 @@
         scanner.close();
       }
       
-      scanner = client.obtainScanner(COLUMNS, HClient.EMPTY_START_ROW, T1);
+      scanner = table.obtainScanner(COLUMNS, HConstants.EMPTY_START_ROW, T1);
       try {
         HStoreKey key = new HStoreKey();
         TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
@@ -170,7 +170,7 @@
         scanner.close();
       }
       
-      scanner = client.obtainScanner(COLUMNS, HClient.EMPTY_START_ROW, T0);
+      scanner = table.obtainScanner(COLUMNS, HConstants.EMPTY_START_ROW, T0);
       try {
         HStoreKey key = new HStoreKey();
         TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();



Mime
View raw message