hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r532083 [1/4] - in /lucene/hadoop/trunk: ./ src/contrib/hbase/conf/ src/contrib/hbase/src/java/org/apache/hadoop/hbase/ src/contrib/hbase/src/test/org/apache/hadoop/hbase/
Date Tue, 24 Apr 2007 21:13:10 GMT
Author: cutting
Date: Tue Apr 24 14:13:08 2007
New Revision: 532083

URL: http://svn.apache.org/viewvc?view=rev&rev=532083
Log:
HADOOP-1282.  Omnibus HBase patch.  Improved tests and configuration.  Contributed by Jim
Kellerman.

Added:
    lucene/hadoop/trunk/src/contrib/hbase/conf/
    lucene/hadoop/trunk/src/contrib/hbase/conf/hbase-default.xml
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LockException.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java
Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConstants.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLocking.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogEdit.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterRegionInterface.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Tue Apr 24 14:13:08 2007
@@ -252,6 +252,9 @@
     jobs run with speculative execution.
     (Arun C Murthy via tomwhite)
 
+76. HADOOP-1282.  Omnibus HBase patch.  Improved tests & configuration.
+    (Jim Kellerman via cutting)
+
 
 Release 0.12.3 - 2007-04-06
 

Added: lucene/hadoop/trunk/src/contrib/hbase/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/conf/hbase-default.xml?view=auto&rev=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/conf/hbase-default.xml (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/conf/hbase-default.xml Tue Apr 24 14:13:08 2007
@@ -0,0 +1,48 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+  <property>
+    <name>hbase.master</name>
+    <value>localhost:60000</value>
+    <description>The host and port that the HBase master runs at.
+        TODO: Support 'local' (All running in single context).
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver</name>
+    <value>localhost:60010</value>
+    <description>The host and port a HBase region server runs at.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regiondir</name>
+    <value>${hadoop.tmp.dir}/hbase</value>
+    <description>The directory shared by region servers.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.timeout.length</name>
+    <value>10000</value>
+    <description>Client timeout in milliseconds</description>
+  </property>
+  <property>
+    <name>hbase.client.timeout.number</name>
+    <value>5</value>
+    <description>Try this many timeouts before giving up.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.retries.number</name>
+    <value>2</value>
+    <description>Count of maximum retries fetching the root region from root
+        region server.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>60000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the META table.
+    </description>
+  </property>
+</configuration>

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
Tue Apr 24 14:13:08 2007
@@ -60,17 +60,17 @@
       String column = col.toString();
       try {
         int colpos = column.indexOf(":") + 1;
-        if (colpos == 0) {
+        if(colpos == 0) {
           throw new IllegalArgumentException("Column name has no family indicator.");
         }
 
         String columnkey = column.substring(colpos);
 
-        if (columnkey == null || columnkey.length() == 0) {
+        if(columnkey == null || columnkey.length() == 0) {
           this.matchType = MATCH_TYPE.FAMILY_ONLY;
           this.family = column.substring(0, colpos);
 
-        } else if (isRegexPattern.matcher(columnkey).matches()) {
+        } else if(isRegexPattern.matcher(columnkey).matches()) {
           this.matchType = MATCH_TYPE.REGEX;
           this.columnMatcher = Pattern.compile(column);
 
@@ -86,13 +86,13 @@
     // Matching method
     
     boolean matches(Text col) throws IOException {
-      if (this.matchType == MATCH_TYPE.SIMPLE) {
+      if(this.matchType == MATCH_TYPE.SIMPLE) {
         return col.equals(this.col);
         
-      } else if (this.matchType == MATCH_TYPE.FAMILY_ONLY) {
+      } else if(this.matchType == MATCH_TYPE.FAMILY_ONLY) {
         return col.toString().startsWith(this.family);
         
-      } else if (this.matchType == MATCH_TYPE.REGEX) {
+      } else if(this.matchType == MATCH_TYPE.REGEX) {
         return this.columnMatcher.matcher(col.toString()).matches();
         
       } else {
@@ -121,7 +121,7 @@
     for(int i = 0; i < targetCols.length; i++) {
       Text family = HStoreKey.extractFamily(targetCols[i]);
       Vector<ColumnMatcher> matchers = okCols.get(family);
-      if (matchers == null) {
+      if(matchers == null) {
         matchers = new Vector<ColumnMatcher>();
       }
       matchers.add(new ColumnMatcher(targetCols[i]));
@@ -144,11 +144,11 @@
     Text column = keys[i].getColumn();
     Text family = HStoreKey.extractFamily(column);
     Vector<ColumnMatcher> matchers = okCols.get(family);
-    if (matchers == null) {
+    if(matchers == null) {
       return false;
     }
     for(int m = 0; m < matchers.size(); m++) {
-      if (matchers.get(m).matches(column)) {
+      if(matchers.get(m).matches(column)) {
         return true;
       }
     }
@@ -180,7 +180,7 @@
    * @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey,
java.util.TreeMap)
    */
   public boolean next(HStoreKey key, TreeMap<Text, byte[]> results)
-    throws IOException {
+      throws IOException {
  
     // Find the next row label (and timestamp)
  
@@ -188,12 +188,12 @@
     long chosenTimestamp = -1;
     for(int i = 0; i < keys.length; i++) {
       while((keys[i] != null)
-            && (columnMatch(i))
-            && (keys[i].getTimestamp() <= this.timestamp)
-            && ((chosenRow == null)
-                || (keys[i].getRow().compareTo(chosenRow) < 0)
-                || ((keys[i].getRow().compareTo(chosenRow) == 0)
-                    && (keys[i].getTimestamp() > chosenTimestamp)))) {
+          && (columnMatch(i))
+          && (keys[i].getTimestamp() <= this.timestamp)
+          && ((chosenRow == null)
+              || (keys[i].getRow().compareTo(chosenRow) < 0)
+              || ((keys[i].getRow().compareTo(chosenRow) == 0)
+                  && (keys[i].getTimestamp() > chosenTimestamp)))) {
 
         chosenRow = new Text(keys[i].getRow());
         chosenTimestamp = keys[i].getTimestamp();
@@ -203,7 +203,7 @@
     // Grab all the values that match this row/timestamp
 
     boolean insertedItem = false;
-    if (chosenRow != null) {
+    if(chosenRow != null) {
       key.setRow(chosenRow);
       key.setVersion(chosenTimestamp);
       key.setColumn(new Text(""));
@@ -212,10 +212,10 @@
         // Fetch the data
         
         while((keys[i] != null)
-              && (keys[i].getRow().compareTo(chosenRow) == 0)
-              && (keys[i].getTimestamp() == chosenTimestamp)) {
+            && (keys[i].getRow().compareTo(chosenRow) == 0)
+            && (keys[i].getTimestamp() == chosenTimestamp)) {
 
-          if (columnMatch(i)) {
+          if(columnMatch(i)) {
             outbuf.reset();
             vals[i].write(outbuf);
             byte byteresults[] = outbuf.getData();
@@ -226,7 +226,7 @@
             insertedItem = true;
           }
 
-          if (!getNext(i)) {
+          if (! getNext(i)) {
             closeSubScanner(i);
           }
         }
@@ -235,9 +235,9 @@
         // a valid timestamp, so we're ready next time.
         
         while((keys[i] != null)
-              && ((keys[i].getRow().compareTo(chosenRow) <= 0)
-                  || (keys[i].getTimestamp() > this.timestamp)
-                  || (!columnMatch(i)))) {
+            && ((keys[i].getRow().compareTo(chosenRow) <= 0)
+                || (keys[i].getTimestamp() > this.timestamp)
+                || (! columnMatch(i)))) {
 
           getNext(i);
         }

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java?view=auto&rev=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java
(added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java
Tue Apr 24 14:13:08 2007
@@ -0,0 +1,25 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class HBaseConfiguration extends Configuration {
+  public HBaseConfiguration() {
+    super();
+    addDefaultResource("hbase-default.xml");
+  }
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java Tue
Apr 24 14:13:08 2007
@@ -15,29 +15,39 @@
  */
 package org.apache.hadoop.hbase;
 
-import org.apache.hadoop.io.*;
-import org.apache.hadoop.ipc.*;
-import org.apache.hadoop.conf.*;
-
-import java.io.*;
-import java.util.*;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Random;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.log4j.Logger;
 
 /*******************************************************************************
  * HClient manages a connection to a single HRegionServer.
  ******************************************************************************/
-public class HClient extends HGlobals implements HConstants {
+public class HClient implements HConstants {
+  private final Logger LOG =
+    Logger.getLogger(this.getClass().getName());
+  
   private static final Text[] metaColumns = {
     META_COLUMN_FAMILY
   };
   private static final Text startRow = new Text();
   
   private boolean closed;
-  private Configuration conf;
-  private HServerAddress masterLocation;
   private long clientTimeout;
   private int numTimeouts;
   private int numRetries;
   private HMasterInterface master;
+  private final Configuration conf;
   
   private class TableInfo {
     public HRegionInfo regionInfo;
@@ -72,16 +82,11 @@
   public HClient(Configuration conf) {
     this.closed = false;
     this.conf = conf;
-    
-    // Load config settings
-    
-    this.masterLocation = new HServerAddress(this.conf.get(MASTER_DEFAULT_NAME));
-    this.clientTimeout = this.conf.getLong("hbase.client.timeout.length", 10 * 1000);
-    this.numTimeouts = this.conf.getInt("hbase.client.timeout.number", 5);
-    this.numRetries = this.conf.getInt("hbase.client.retries.number", 2);
-    
-    // Finish initialization
 
+    this.clientTimeout = conf.getLong("hbase.client.timeout.length", 10 * 1000);
+    this.numTimeouts = conf.getInt("hbase.client.timeout.number", 5);
+    this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
+    
     this.master = null;
     this.tablesToServers = new TreeMap<Text, TreeMap<Text, TableInfo>>();
     this.tableServers = null;
@@ -94,13 +99,33 @@
     this.rand = new Random();
   }
 
+  public synchronized void createTable(HTableDescriptor desc) throws IOException {
+    if(closed) {
+      throw new IllegalStateException("client is not open");
+    }
+    if(master == null) {
+      locateRootRegion();
+    }
+    master.createTable(desc);
+  }
+
+  public synchronized void deleteTable(Text tableName) throws IOException {
+    if(closed) {
+      throw new IllegalStateException("client is not open");
+    }
+    if(master == null) {
+      locateRootRegion();
+    }
+    master.deleteTable(tableName);
+  }
+  
   public synchronized void openTable(Text tableName) throws IOException {
-    if (closed) {
+    if(closed) {
       throw new IllegalStateException("client is not open");
     }
 
     tableServers = tablesToServers.get(tableName);
-    if (tableServers == null) {                 // We don't know where the table is
+    if(tableServers == null ) {                 // We don't know where the table is
       findTableInMeta(tableName);               // Load the information from meta
     }
   }
@@ -108,9 +133,9 @@
   private void findTableInMeta(Text tableName) throws IOException {
     TreeMap<Text, TableInfo> metaServers = tablesToServers.get(META_TABLE_NAME);
     
-    if (metaServers == null) {                   // Don't know where the meta is
+    if(metaServers == null) {                   // Don't know where the meta is
       loadMetaFromRoot(tableName);
-      if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
+      if(tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
         // All we really wanted was the meta or root table
         return;
       }
@@ -119,7 +144,7 @@
 
     tableServers = new TreeMap<Text, TableInfo>();
     for(Iterator<TableInfo> i = metaServers.tailMap(tableName).values().iterator();
-        i.hasNext();) {
+        i.hasNext(); ) {
       
       TableInfo t = i.next();
       
@@ -133,7 +158,7 @@
    */
   private void loadMetaFromRoot(Text tableName) throws IOException {
     locateRootRegion();
-    if (tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
+    if(tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
       return;
     }
     scanRoot();
@@ -144,10 +169,12 @@
    * could be.
    */
   private void locateRootRegion() throws IOException {
-    if (master == null) {
+    if(master == null) {
+      HServerAddress masterLocation =
+        new HServerAddress(this.conf.get(MASTER_ADDRESS));
       master = (HMasterInterface)RPC.getProxy(HMasterInterface.class, 
-                                              HMasterInterface.versionID,
-                                              masterLocation.getInetSocketAddress(), conf);
+          HMasterInterface.versionID,
+          masterLocation.getInetSocketAddress(), conf);
     }
     
     int tries = 0;
@@ -157,16 +184,15 @@
       while(rootRegionLocation == null && localTimeouts < numTimeouts) {
         rootRegionLocation = master.findRootRegion();
 
-        if (rootRegionLocation == null) {
+        if(rootRegionLocation == null) {
           try {
             Thread.sleep(clientTimeout);
-
           } catch(InterruptedException iex) {
           }
           localTimeouts++;
         }
       }
-      if (rootRegionLocation == null) {
+      if(rootRegionLocation == null) {
         throw new IOException("Timed out trying to locate root region");
       }
       
@@ -174,9 +200,9 @@
       
       HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
 
-      if (rootRegion.getRegionInfo(rootRegionInfo.regionName) != null) {
+      if(rootRegion.getRegionInfo(HGlobals.rootRegionInfo.regionName) != null) {
         tableServers = new TreeMap<Text, TableInfo>();
-        tableServers.put(startRow, new TableInfo(rootRegionInfo, rootRegionLocation));
+        tableServers.put(startRow, new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation));
         tablesToServers.put(ROOT_TABLE_NAME, tableServers);
         break;
       }
@@ -184,7 +210,7 @@
       
     } while(rootRegionLocation == null && tries++ < numRetries);
     
-    if (rootRegionLocation == null) {
+    if(rootRegionLocation == null) {
       closed = true;
       throw new IOException("unable to locate root region server");
     }
@@ -202,53 +228,64 @@
 
   /*
    * Scans a single meta region
-   * @param t           - the table we're going to scan
-   * @param tableName   - the name of the table we're looking for
+   * @param t the table we're going to scan
+   * @param tableName the name of the table we're looking for
    */
   private void scanOneMetaRegion(TableInfo t, Text tableName) throws IOException {
     HRegionInterface server = getHRegionConnection(t.serverAddress);
-    HScannerInterface scanner = null;
+    long scannerId = -1L;
     try {
-      scanner = server.openScanner(t.regionInfo.regionName, metaColumns, tableName);
-      HStoreKey key = new HStoreKey();
-      TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+      scannerId = server.openScanner(t.regionInfo.regionName, metaColumns, tableName);
+
       DataInputBuffer inbuf = new DataInputBuffer();
+      while(true) {
+        HStoreKey key = new HStoreKey();
 
-      while(scanner.next(key, results)) {
-        byte hRegionInfoBytes[] = results.get(META_COL_REGIONINFO);
-        inbuf.reset(hRegionInfoBytes, hRegionInfoBytes.length);
+        LabelledData[] values = server.next(scannerId, key);
+        if(values.length == 0) {
+          break;
+        }
+
+        TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+        for(int i = 0; i < values.length; i++) {
+          results.put(values[i].getLabel(), values[i].getData().get());
+        }
         HRegionInfo regionInfo = new HRegionInfo();
+        byte[] bytes = results.get(META_COL_REGIONINFO);
+        inbuf.reset(bytes, bytes.length);
         regionInfo.readFields(inbuf);
-        
-        if (!regionInfo.tableDesc.getName().equals(tableName)) {
+
+        if(!regionInfo.tableDesc.getName().equals(tableName)) {
           // We're done
           break;
         }
-                    
-        byte serverBytes[] = results.get(META_COL_SERVER);
-        String serverName = new String(serverBytes, UTF8_ENCODING);
+        
+        bytes = results.get(META_COL_SERVER);
+        String serverName = new String(bytes, UTF8_ENCODING);
           
         tableServers.put(regionInfo.startKey, 
-                         new TableInfo(regionInfo, new HServerAddress(serverName)));
+            new TableInfo(regionInfo, new HServerAddress(serverName)));
 
-        results.clear();
       }
+
     } finally {
-      scanner.close();
+      if(scannerId != -1L) {
+        server.close(scannerId);
+      }
     }
   }
 
-  public synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer)
-    throws IOException {
+  synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer)
+      throws IOException {
 
-    // See if we already have a connection
+      // See if we already have a connection
 
     HRegionInterface server = servers.get(regionServer.toString());
     
-    if (server == null) {                                // Get a connection
+    if(server == null) {                                // Get a connection
       
       server = (HRegionInterface)RPC.waitForProxy(HRegionInterface.class, 
-                                                  HRegionInterface.versionID, regionServer.getInetSocketAddress(),
conf);
+          HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
       
       servers.put(regionServer.toString(), server);
     }
@@ -257,7 +294,7 @@
 
   /** Close the connection to the HRegionServer */
   public synchronized void close() throws IOException {
-    if (!closed) {
+    if(! closed) {
       RPC.stopClient();
       closed = true;
     }
@@ -270,48 +307,58 @@
    * catalog table that just contains table names and their descriptors.
    * Right now, it only exists as part of the META table's region info.
    */
-  public HTableDescriptor[] listTables() throws IOException {
+  public synchronized HTableDescriptor[] listTables() throws IOException {
     TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
     
     TreeMap<Text, TableInfo> metaTables = tablesToServers.get(META_TABLE_NAME);
-    if (metaTables == null) {
+    if(metaTables == null) {
       // Meta is not loaded yet so go do that
       loadMetaFromRoot(META_TABLE_NAME);
       metaTables = tablesToServers.get(META_TABLE_NAME);
     }
 
-    for(Iterator<TableInfo>i = metaTables.values().iterator(); i.hasNext();) {
-      TableInfo t = i.next();
+    for(Iterator<TableInfo>it = metaTables.values().iterator(); it.hasNext(); ) {
+      TableInfo t = it.next();
       HRegionInterface server = getHRegionConnection(t.serverAddress);
-      HScannerInterface scanner = null;
+      long scannerId = -1L;
       try {
-        scanner = server.openScanner(t.regionInfo.regionName, metaColumns, startRow);
+        scannerId = server.openScanner(t.regionInfo.regionName, metaColumns, startRow);
         HStoreKey key = new HStoreKey();
-        TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+        
         DataInputBuffer inbuf = new DataInputBuffer();
-        while(scanner.next(key, results)) {
-          byte infoBytes[] = (byte[]) results.get(ROOT_COL_REGIONINFO);
-          inbuf.reset(infoBytes, infoBytes.length);
-          HRegionInfo info = new HRegionInfo();
-          info.readFields(inbuf);
+        while(true) {
+          LabelledData[] values = server.next(scannerId, key);
+          if(values.length == 0) {
+            break;
+          }
 
-          // Only examine the rows where the startKey is zero length
-          
-          if (info.startKey.getLength() == 0) {
-            uniqueTables.add(info.tableDesc);
+          for(int i = 0; i < values.length; i++) {
+            if(values[i].getLabel().equals(META_COL_REGIONINFO)) {
+              byte[] bytes = values[i].getData().get();
+              inbuf.reset(bytes, bytes.length);
+              HRegionInfo info = new HRegionInfo();
+              info.readFields(inbuf);
+
+              // Only examine the rows where the startKey is zero length
+              
+              if(info.startKey.getLength() == 0) {
+                uniqueTables.add(info.tableDesc);
+              }
+            }
           }
-          results.clear();
         }
         
       } finally {
-        scanner.close();
+        if(scannerId != -1L) {
+          server.close(scannerId);
+        }
       }
     }
     return (HTableDescriptor[]) uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
   }
 
-  private TableInfo getTableInfo(Text row) {
-    if (tableServers == null) {
+  private synchronized TableInfo getTableInfo(Text row) {
+    if(tableServers == null) {
       throw new IllegalStateException("Must open table first");
     }
     
@@ -325,17 +372,17 @@
   public byte[] get(Text row, Text column) throws IOException {
     TableInfo info = getTableInfo(row);
     return getHRegionConnection(info.serverAddress).get(
-                                                        info.regionInfo.regionName, row,
column).get();
+        info.regionInfo.regionName, row, column).get();
   }
  
   /** Get the specified number of versions of the specified row and column */
   public byte[][] get(Text row, Text column, int numVersions) throws IOException {
     TableInfo info = getTableInfo(row);
     BytesWritable[] values = getHRegionConnection(info.serverAddress).get(
-                                                                          info.regionInfo.regionName,
row, column, numVersions);
+        info.regionInfo.regionName, row, column, numVersions);
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-    for(int i = 0; i < values.length; i++) {
+    for(int i = 0 ; i < values.length; i++) {
       bytes.add(values[i].get());
     }
     return bytes.toArray(new byte[values.length][]);
@@ -348,10 +395,10 @@
   public byte[][] get(Text row, Text column, long timestamp, int numVersions) throws IOException
{
     TableInfo info = getTableInfo(row);
     BytesWritable[] values = getHRegionConnection(info.serverAddress).get(
-                                                                          info.regionInfo.regionName,
row, column, timestamp, numVersions);
+        info.regionInfo.regionName, row, column, timestamp, numVersions);
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-    for(int i = 0; i < values.length; i++) {
+    for(int i = 0 ; i < values.length; i++) {
       bytes.add(values[i].get());
     }
     return bytes.toArray(new byte[values.length][]);
@@ -361,15 +408,15 @@
   public LabelledData[] getRow(Text row) throws IOException {
     TableInfo info = getTableInfo(row);
     return getHRegionConnection(info.serverAddress).getRow(
-                                                           info.regionInfo.regionName, row);
+        info.regionInfo.regionName, row);
   }
 
   /** 
    * Get a scanner on the current table starting at the specified row.
    * Return the specified columns.
    */
-  public HScannerInterface obtainScanner(Text[] columns, Text startRow) throws IOException
{
-    if (tableServers == null) {
+  public synchronized HScannerInterface obtainScanner(Text[] columns, Text startRow) throws
IOException {
+    if(tableServers == null) {
       throw new IllegalStateException("Must open table first");
     }
     return new ClientScanner(columns, startRow);
@@ -462,7 +509,7 @@
     private TableInfo[] regions;
     private int currentRegion;
     private HRegionInterface server;
-    private HScannerInterface scanner;
+    private long scannerId;
     
     public ClientScanner(Text[] columns, Text startRow) throws IOException {
       this.columns = columns;
@@ -472,7 +519,7 @@
       this.regions = info.toArray(new TableInfo[info.size()]);
       this.currentRegion = -1;
       this.server = null;
-      this.scanner = null;
+      this.scannerId = -1L;
       nextScanner();
     }
     
@@ -481,18 +528,19 @@
      * Returns false if there are no more scanners.
      */
     private boolean nextScanner() throws IOException {
-      if (scanner != null) {
-        scanner.close();
+      if(scannerId != -1L) {
+        server.close(scannerId);
+        scannerId = -1L;
       }
       currentRegion += 1;
-      if (currentRegion == regions.length) {
+      if(currentRegion == regions.length) {
         close();
         return false;
       }
       try {
         server = getHRegionConnection(regions[currentRegion].serverAddress);
-        scanner = server.openScanner(regions[currentRegion].regionInfo.regionName,
-                                     columns, startRow);
+        scannerId = server.openScanner(regions[currentRegion].regionInfo.regionName,
+            columns, startRow);
         
       } catch(IOException e) {
         close();
@@ -505,29 +553,66 @@
      * @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey,
java.util.TreeMap)
      */
     public boolean next(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException
{
-      if (closed) {
+      if(closed) {
         return false;
       }
-      boolean status = scanner.next(key, results);
-      if (!status) {
-        status = nextScanner();
-        if (status) {
-          status = scanner.next(key, results);
-        }
+      LabelledData[] values = null;
+      do {
+        values = server.next(scannerId, key);
+      } while(values.length == 0 && nextScanner());
+
+      for(int i = 0; i < values.length; i++) {
+        results.put(values[i].getLabel(), values[i].getData().get());
       }
-      return status;
+      return values.length != 0;
     }
 
     /* (non-Javadoc)
      * @see org.apache.hadoop.hbase.HScannerInterface#close()
      */
     public void close() throws IOException {
-      if (scanner != null) {
-        scanner.close();
+      if(scannerId != -1L) {
+        server.close(scannerId);
       }
       server = null;
       closed = true;
     }
   }
-
-}
+  
+  private void printUsage() {
+    System.err.println("Usage: java " + this.getClass().getName() +
+        " [--master=hostname:port]");
+  }
+  
+  private int doCommandLine(final String args[]) {
+    // Process command-line args. TODO: Better cmd-line processing
+    // (but hopefully something not as painful as cli options).
+    for (String cmd: args) {
+      if (cmd.equals("-h") || cmd.startsWith("--h")) {
+        printUsage();
+        return 0;
+      }
+      
+      final String masterArgKey = "--master=";
+      if (cmd.startsWith(masterArgKey)) {
+        this.conf.set(MASTER_ADDRESS,
+            cmd.substring(masterArgKey.length()));
+      }
+    }
+    
+    int errCode = -1;
+    try {
+      locateRootRegion();
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+    
+    return errCode;
+  }
+  
+  public static void main(final String args[]) {
+    Configuration c = new HBaseConfiguration();
+    int errCode = (new HClient(c)).doCommandLine(args);
+    System.exit(errCode);
+  }
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConstants.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConstants.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConstants.java
Tue Apr 24 14:13:08 2007
@@ -24,7 +24,18 @@
   
   // Configuration parameters
   
-  static final String MASTER_DEFAULT_NAME = "hbase.master.default.name";
+  // TODO: URL for hbase master, like hdfs URLs with host and port.
+  // Or, like jdbc URLs:
+  // jdbc:mysql://[host][,failoverhost...][:port]/[database]
+  // jdbc:mysql://[host][,failoverhost...][:port]/[database][?propertyName1][=propertyValue1][&propertyName2][=propertyValue2]...
+  
+  static final String MASTER_ADDRESS = "hbase.master";
+  // TODO: Support 'local': i.e. default of all running in single
+  // process.  Same for regionserver.
+  static final String DEFAULT_MASTER_ADDRESS = "localhost:60000";
+  static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
+  static final String DEFAULT_REGIONSERVER_ADDRESS =
+    "localhost:60010";
   static final String HREGION_DIR = "hbase.regiondir";
   static final String DEFAULT_HREGION_DIR = "/hbase";
   static final String HREGIONDIR_PREFIX = "hregion_";
@@ -37,10 +48,10 @@
   // Do we ever need to know all the information that we are storing?
   
   static final Text ROOT_TABLE_NAME = new Text("--ROOT--");
-  static final Text ROOT_COLUMN_FAMILY = new Text("info");
-  static final Text ROOT_COL_REGIONINFO = new Text(ROOT_COLUMN_FAMILY + ":" + "regioninfo");
-  static final Text ROOT_COL_SERVER = new Text(ROOT_COLUMN_FAMILY + ":" + "server");
-  static final Text ROOT_COL_STARTCODE = new Text(ROOT_COLUMN_FAMILY + ":" + "serverstartcode");
+  static final Text ROOT_COLUMN_FAMILY = new Text("info:");
+  static final Text ROOT_COL_REGIONINFO = new Text(ROOT_COLUMN_FAMILY + "regioninfo");
+  static final Text ROOT_COL_SERVER = new Text(ROOT_COLUMN_FAMILY + "server");
+  static final Text ROOT_COL_STARTCODE = new Text(ROOT_COLUMN_FAMILY + "serverstartcode");
 
   static final Text META_TABLE_NAME = new Text("--META--");
   static final Text META_COLUMN_FAMILY = new Text(ROOT_COLUMN_FAMILY);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLocking.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLocking.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLocking.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLocking.java Tue
Apr 24 14:13:08 2007
@@ -1,90 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-/*******************************************************************************
- * HLocking is a set of lock primitives that are pretty helpful in a few places
- * around the HBase code.  For each independent entity that needs locking, create
- * a new HLocking instance.
- ******************************************************************************/
-public class HLocking {
-  Integer readerLock = new Integer(0);
-  Integer writerLock = new Integer(0);
-  int numReaders = 0;
-  int numWriters = 0;
-
-  public HLocking() {
-  }
-
-  /** Caller needs the nonexclusive read-lock */
-  public void obtainReadLock() {
-    synchronized(readerLock) {
-      synchronized(writerLock) {
-        while(numWriters > 0) {
-          try {
-            writerLock.wait();
-          } catch (InterruptedException ie) {
-          }
-        }
-        numReaders++;
-        readerLock.notifyAll();
-      }
-    }
-  }
-
-  /** Caller is finished with the nonexclusive read-lock */
-  public void releaseReadLock() {
-    synchronized(readerLock) {
-      synchronized(writerLock) {
-        numReaders--;
-        readerLock.notifyAll();
-      }
-    }
-  }
-
-  /** Caller needs the exclusive write-lock */
-  public void obtainWriteLock() {
-    synchronized(readerLock) {
-      synchronized(writerLock) {
-        while(numReaders > 0) {
-          try {
-            readerLock.wait();
-          } catch (InterruptedException ie) {
-          }
-        }
-        while(numWriters > 0) {
-          try {
-            writerLock.wait();
-          } catch (InterruptedException ie) {
-          }
-        }
-        numWriters++;
-        writerLock.notifyAll();
-      }
-    }
-  }
-
-  /** Caller is finished with the write lock */
-  public void releaseWriteLock() {
-    synchronized(readerLock) {
-      synchronized(writerLock) {
-        numWriters--;
-        writerLock.notifyAll();
-      }
-    }
-  }
-}
-

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java Tue Apr
24 14:13:08 2007
@@ -101,12 +101,12 @@
       newlog.close();
     }
     
-    if (fs.exists(srcDir)) {
+    if(fs.exists(srcDir)) {
       
-      if (!fs.delete(srcDir)) {
+      if(! fs.delete(srcDir)) {
         LOG.error("Cannot delete: " + srcDir);
         
-        if (!FileUtil.fullyDelete(new File(srcDir.toString()))) {
+        if(! FileUtil.fullyDelete(new File(srcDir.toString()))) {
           throw new IOException("Cannot delete: " + srcDir);
         }
       }
@@ -127,7 +127,7 @@
     this.conf = conf;
     this.logSeqNum = 0;
 
-    if (fs.exists(dir)) {
+    if(fs.exists(dir)) {
       throw new IOException("Target HLog directory already exists: " + dir);
     }
     fs.mkdirs(dir);
@@ -154,7 +154,7 @@
 
       Vector<Path> toDeleteList = new Vector<Path>();
       synchronized(this) {
-        if (closed) {
+        if(closed) {
           throw new IOException("Cannot roll log; log is closed");
         }
 
@@ -174,10 +174,10 @@
 
         // Close the current writer (if any), and grab a new one.
         
-        if (writer != null) {
+        if(writer != null) {
           writer.close();
           
-          if (filenum > 0) {
+          if(filenum > 0) {
             outputfiles.put(logSeqNum-1, computeFilename(filenum-1));
           }
         }
@@ -192,10 +192,10 @@
         // over all the regions.
 
         long oldestOutstandingSeqNum = Long.MAX_VALUE;
-        for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext();)
{
+        for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext();
) {
           long curSeqNum = it.next().longValue();
           
-          if (curSeqNum < oldestOutstandingSeqNum) {
+          if(curSeqNum < oldestOutstandingSeqNum) {
             oldestOutstandingSeqNum = curSeqNum;
           }
         }
@@ -205,10 +205,10 @@
 
         LOG.debug("removing old log files");
         
-        for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext(); ) {
           long maxSeqNum = it.next().longValue();
           
-          if (maxSeqNum < oldestOutstandingSeqNum) {
+          if(maxSeqNum < oldestOutstandingSeqNum) {
             Path p = outputfiles.get(maxSeqNum);
             it.remove();
             toDeleteList.add(p);
@@ -221,7 +221,7 @@
 
       // Actually delete them, if any!
 
-      for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext();) {
+      for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext(); ) {
         Path p = it.next();
         fs.delete(p);
       }
@@ -262,7 +262,7 @@
    * We need to seize a lock on the writer so that writes are atomic.
    */
   public synchronized void append(Text regionName, Text tableName, Text row, TreeMap<Text,
byte[]> columns, long timestamp) throws IOException {
-    if (closed) {
+    if(closed) {
       throw new IOException("Cannot append; log is closed");
     }
     
@@ -273,12 +273,12 @@
     // that don't have any flush yet, the relevant operation is the
     // first one that's been added.
     
-    if (regionToLastFlush.get(regionName) == null) {
+    if(regionToLastFlush.get(regionName) == null) {
       regionToLastFlush.put(regionName, seqNum[0]);
     }
 
     int counter = 0;
-    for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
+    for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
       Text column = it.next();
       byte[] val = columns.get(column);
       HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum[counter++]);
@@ -333,16 +333,16 @@
 
   /** Complete the cache flush */
   public synchronized void completeCacheFlush(Text regionName, Text tableName, long logSeqId)
throws IOException {
-    if (closed) {
+    if(closed) {
       return;
     }
     
-    if (!insideCacheFlush) {
+    if(! insideCacheFlush) {
       throw new IOException("Impossible situation: inside completeCacheFlush(), but 'insideCacheFlush'
flag is false");
     }
     
     writer.append(new HLogKey(regionName, tableName, HLog.METAROW, logSeqId),
-                  new HLogEdit(HLog.METACOLUMN, HStoreKey.COMPLETE_CACHEFLUSH, System.currentTimeMillis()));
+        new HLogEdit(HLog.METACOLUMN, HStoreKey.COMPLETE_CACHEFLUSH, System.currentTimeMillis()));
     numEntries++;
 
     // Remember the most-recent flush for each region.
@@ -353,4 +353,4 @@
     insideCacheFlush = false;
     notifyAll();
   }
-}
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogEdit.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogEdit.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogEdit.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogEdit.java Tue
Apr 24 14:13:08 2007
@@ -67,5 +67,4 @@
     this.val.readFields(in);
     this.timestamp = in.readLong();
   }
-}
-
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java Tue
Apr 24 14:13:08 2007
@@ -80,10 +80,10 @@
     HLogKey other = (HLogKey) o;
     int result = this.regionName.compareTo(other.regionName);
     
-    if (result == 0) {
+    if(result == 0) {
       result = this.row.compareTo(other.row);
       
-      if (result == 0) {
+      if(result == 0) {
         
         if (this.logSeqNum < other.logSeqNum) {
           result = -1;
@@ -113,5 +113,4 @@
     this.row.readFields(in);
     this.logSeqNum = in.readLong();
   }
-}
-
+}
\ No newline at end of file



Mime
View raw message