hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r535970 [4/4] - in /lucene/hadoop/trunk: ./ src/contrib/hbase/bin/ src/contrib/hbase/conf/ src/contrib/hbase/src/java/org/apache/hadoop/hbase/ src/contrib/hbase/src/test/org/apache/hadoop/hbase/
Date Mon, 07 May 2007 19:58:57 GMT
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java?view=diff&rev=535970&r1=535969&r2=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java Mon May  7 12:58:53 2007
@@ -23,9 +23,6 @@
  * A Key for a stored row
  ******************************************************************************/
 public class HStoreKey implements WritableComparable {
-  public static final byte[] DELETE_BYTES = "HSTOREKEY::DELETEVAL".getBytes();
-  public static final byte[] COMPLETE_CACHEFLUSH = "HSTOREKEY::CACHEFLUSH".getBytes();
-
   public static Text extractFamily(Text col) throws IOException {
     String column = col.toString();
     int colpos = column.indexOf(":");
@@ -97,32 +94,42 @@
    * @param other Key to compare against. Compares row and column.
    * @return True if same row and column.
    * @see {@link #matchesWithoutColumn(HStoreKey)}
+   * @see {@link #matchesRowFamily(HStoreKey)}
    */ 
   public boolean matchesRowCol(HStoreKey other) {
-    if(this.row.compareTo(other.row) == 0 &&
-        this.column.compareTo(other.column) == 0) {
-      return true;
-      
-    } else {
-      return false;
-    }
+    return this.row.compareTo(other.row) == 0
+      && this.column.compareTo(other.column) == 0;
   }
   
   /**
-   * @param other Key to copmare against. Compares row and
-   * timestamp.
-   * @return True if same row and timestamp is greater than
-   * <code>other</code>
+   * @param other Key to copmare against. Compares row and timestamp.
+   * 
+   * @return True if same row and timestamp is greater than <code>other</code>
    * @see {@link #matchesRowCol(HStoreKey)}
+   * @see {@link #matchesRowFamily(HStoreKey)}
    */
   public boolean matchesWithoutColumn(HStoreKey other) {
-    if((this.row.compareTo(other.row) == 0) &&
-        (this.timestamp >= other.getTimestamp())) {
-      return true;
+    return this.row.compareTo(other.row) == 0
+      && this.timestamp >= other.getTimestamp();
+  }
+  
+  /**
+   * @param other Key to compare against. Compares row and column family
+   * 
+   * @return true if same row and column family
+   * @see {@link #matchesRowCol(HStoreKey)}
+   * @see {@link #matchesWithoutColumn(HStoreKey)}
+   */
+  public boolean matchesRowFamily(HStoreKey other) {
+    boolean status = false;
+    try {
+      status = this.row.compareTo(other.row) == 0
+        && extractFamily(this.column).compareTo(
+            extractFamily(other.getColumn())) == 0;
       
-    } else {
-      return false;
+    } catch(IOException e) {
     }
+    return status;
   }
   
   public String toString() {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?view=diff&rev=535970&r1=535969&r2=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Mon May  7 12:58:53 2007
@@ -15,26 +15,68 @@
  */
 package org.apache.hadoop.hbase;
 
-import org.apache.hadoop.io.*;
-
-import java.io.*;
-import java.util.*;
-
-/*******************************************************************************
- * HTableDescriptor contains various facts about an HTable, like its columns, 
- * column families, etc.
- ******************************************************************************/
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.TreeSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
+
+/**
+ * HTableDescriptor contains various facts about an HTable, like
+ * column families, maximum number of column versions, etc.
+ */
 public class HTableDescriptor implements WritableComparable {
   Text name;
   int maxVersions;
   TreeSet<Text> families = new TreeSet<Text>();
+  
+  /**
+   * Legal table names can only contain 'word characters':
+   * i.e. <code>[a-zA-Z_0-9]</code>.
+   * 
+   * Lets be restrictive until a reason to be otherwise.
+   */
+  private static final Pattern LEGAL_TABLE_NAME =
+    Pattern.compile("[\\w-]+");
+  
+  /**
+   * Legal family names can only contain 'word characters' and
+   * end in a colon.
+   */
+  private static final Pattern LEGAL_FAMILY_NAME =
+    Pattern.compile("\\w+:");
 
   public HTableDescriptor() {
     this.name = new Text();
     this.families.clear();
   }
 
+  /**
+   * Constructor.
+   * @param name Table name.
+   * @param maxVersions Number of versions of a column to keep.
+   * @throws IllegalArgumentException if passed a table name
+   * that is made of other than 'word' characters: i.e.
+   * <code>[a-zA-Z_0-9]
+   */
   public HTableDescriptor(String name, int maxVersions) {
+    Matcher m = LEGAL_TABLE_NAME.matcher(name);
+    if (m == null || !m.matches()) {
+      throw new IllegalArgumentException("Table names can only " +
+          "contain 'word characters': i.e. [a-zA-Z_0-9");
+    }
+    if (maxVersions <= 0) {
+      // TODO: Allow maxVersion of 0 to be the way you say
+      // "Keep all versions".  Until there is support, consider
+      // 0 -- or < 0 -- a configuration error.
+      throw new IllegalArgumentException("Maximum versions " +
+        "must be positive");
+    }
     this.name = new Text(name);
     this.maxVersions = maxVersions;
   }
@@ -47,19 +89,28 @@
     return maxVersions;
   }
 
-  /** Add a column */
+  /**
+   * Add a column family.
+   * @param family Column family name to add.  Column family names
+   * must end in a <code>:</code>
+   * @throws IllegalArgumentException if passed a table name
+   * that is made of other than 'word' characters: i.e.
+   * <code>[a-zA-Z_0-9]
+   */
   public void addFamily(Text family) {
+    String familyStr = family.toString();
+    Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr);
+    if (m == null || !m.matches()) {
+      throw new IllegalArgumentException("Family names can " +
+          "only contain 'word characters' and must end with a " +
+          "':'");
+    }
     families.add(family);
   }
 
   /** Do we contain a given column? */
   public boolean hasFamily(Text family) {
-    if(families.contains(family)) {
-      return true;
-      
-    } else {
-      return false;
-    }
+    return families.contains(family);
   }
 
   /** All the column families in this table. */
@@ -67,6 +118,12 @@
     return families;
   }
 
+  @Override
+  public String toString() {
+    return "name: " + this.name.toString() +
+      ", maxVersions: " + this.maxVersions + ", families: " + this.families;
+  }
+  
   //////////////////////////////////////////////////////////////////////////////
   // Writable
   //////////////////////////////////////////////////////////////////////////////
@@ -120,4 +177,4 @@
     }
     return result;
   }
-}
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java?view=diff&rev=535970&r1=535969&r2=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java Mon May  7 12:58:53 2007
@@ -31,9 +31,9 @@
     this.data = new BytesWritable();
   }
 
-  public LabelledData(Text label, byte[] data) {
+  public LabelledData(Text label, BytesWritable data) {
     this.label = new Text(label);
-    this.data = new BytesWritable(data);
+    this.data = data;
   }
 
   public Text getLabel() {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java?view=diff&rev=535970&r1=535969&r2=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java Mon May  7 12:58:53 2007
@@ -50,6 +50,7 @@
 
     this.leaseMonitor = new LeaseMonitor();
     this.leaseMonitorThread = new Thread(leaseMonitor);
+    this.leaseMonitorThread.setName("Lease.monitor");
     leaseMonitorThread.start();
   }
 
@@ -60,6 +61,7 @@
   public void close() {
     this.running = false;
     try {
+      this.leaseMonitorThread.interrupt();
       this.leaseMonitorThread.join();
     } catch (InterruptedException iex) {
     }

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/package.html
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/package.html?view=auto&rev=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/package.html (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/package.html Mon May  7 12:58:53 2007
@@ -0,0 +1,28 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head />
+<body bgcolor="white">
+Provides HBase, the <a href="http://lucene.apache.org/hadoop">Hadoop</a> simple database.
+
+<h2>Requirements</h2>
+<ul>
+<li><a href="http://lucene.apache.org/hadoop">Hadoop</a>  It has its own set of <a href="http://lucene.apache.org/hadoop/api/overview-summary.html">requirements</a> (Scroll down the page).</li>
+<li>Java 1.5.x, preferably from <a href="http://lucene.apache.org/hadoop/api/index.html">Sun</a> Set JAVA_HOME to the root of your Java installation</li>
+
+<h2>Getting Started</h2>
+<p>First, you need a working instance of Hadoop.  Download releases at <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/">Hadoop downloads</a>. 
+Unpack the release and connect to its top-level directory.  Edit the file conf/hadoop-env.sh to define at least JAVA_HOME.  Try the following command:
+<pre>bin/hadoop
+</pre>
+This will display the documentation for the Hadoop command script.
+</p>
+<p>TODO</p>
+
+<h2>Related Documentation</h2>
+
+<ul>
+  <li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">Hbase/HbaseArchitecture</a>
+</ul>
+
+</body>
+</html>

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?view=diff&rev=535970&r1=535969&r2=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Mon May  7 12:58:53 2007
@@ -22,22 +22,38 @@
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.Logger;
 
 /**
  * This class creates a single process HBase cluster for junit testing.
  * One thread is created for each server.
  */
 public class MiniHBaseCluster implements HConstants {
+  private static final Logger LOG =
+    Logger.getLogger(MiniHBaseCluster.class.getName());
   private Configuration conf;
   private MiniDFSCluster cluster;
   private FileSystem fs;
   private Path parentdir;
-  private HMasterRunner master;
-  private Thread masterThread;
+  private HMasterRunner masterRunner;
+  private Thread masterRunnerThread;
   private HRegionServerRunner[] regionServers;
   private Thread[] regionThreads;
   
   public MiniHBaseCluster(Configuration conf, int nRegionNodes) {
+    this(conf, nRegionNodes, true);
+  }
+  
+  /**
+   * Constructor.
+   * @param conf
+   * @param nRegionNodes
+   * @param miniHdfsFilesystem If true, set the hbase mini
+   * cluster atop a mini hdfs cluster.  Otherwise, use the
+   * filesystem configured in <code>conf</code>.
+   */
+  public MiniHBaseCluster(Configuration conf, int nRegionNodes,
+      final boolean miniHdfsFilesystem) {
     this.conf = conf;
 
     try {
@@ -47,21 +63,20 @@
               "build/contrib/hbase/test");
 
           String dir = testDir.getAbsolutePath();
-          System.out.println(dir);
+          LOG.info("Setting test.build.data to " + dir);
           System.setProperty("test.build.data", dir);
         }
 
-        // To run using configured filesystem, comment out this
-        // line below that starts up the MiniDFSCluster.
-        this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
+        if (miniHdfsFilesystem) {
+          this.cluster =
+            new MiniDFSCluster(this.conf, 2, true, (String[])null);
+        }
         this.fs = FileSystem.get(conf);
-        this.parentdir =
-          new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR));
+        this.parentdir = new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR));
         fs.mkdirs(parentdir);
 
       } catch(Throwable e) {
-        System.err.println("Mini DFS cluster failed to start");
-        e.printStackTrace();
+        LOG.error("Failed setup of FileSystem", e);
         throw e;
       }
 
@@ -70,28 +85,27 @@
       }
       
       // Create the master
-
-      this.master = new HMasterRunner();
-      this.masterThread = new Thread(master, "HMaster");
+      this.masterRunner = new HMasterRunner();
+      this.masterRunnerThread = new Thread(masterRunner, "masterRunner");
 
       // Start up the master
-
-      masterThread.start();
-      while(! master.isCrashed() && ! master.isInitialized()) {
+      LOG.info("Starting HMaster");
+      masterRunnerThread.start();
+      while(! masterRunner.isCrashed() && ! masterRunner.isInitialized()) {
         try {
-          System.err.println("Waiting for HMaster to initialize...");
+          LOG.info("...waiting for HMaster to initialize...");
           Thread.sleep(1000);
-
         } catch(InterruptedException e) {
         }
-        if(master.isCrashed()) {
+        if(masterRunner.isCrashed()) {
           throw new RuntimeException("HMaster crashed");
         }
       }
-
+      LOG.info("HMaster started.");
+      
       // Set the master's port for the HRegionServers
-
-      this.conf.set(MASTER_ADDRESS, master.getHMasterAddress().toString());
+      String address = masterRunner.getHMasterAddress().toString();
+      this.conf.set(MASTER_ADDRESS, address);
 
       // Start the HRegionServers
 
@@ -99,28 +113,26 @@
         this.conf.set(REGIONSERVER_ADDRESS, "localhost:0");
       }
       
+      LOG.info("Starting HRegionServers");
       startRegionServers(this.conf, nRegionNodes);
+      LOG.info("HRegionServers running");
 
       // Wait for things to get started
 
-      while(! master.isCrashed() && ! master.isUp()) {
+      while(! masterRunner.isCrashed() && ! masterRunner.isUp()) {
         try {
-          System.err.println("Waiting for Mini HBase cluster to start...");
+          LOG.info("Waiting for Mini HBase cluster to start...");
           Thread.sleep(1000);
-
         } catch(InterruptedException e) {
         }
-        if(master.isCrashed()) {
+        if(masterRunner.isCrashed()) {
           throw new RuntimeException("HMaster crashed");
         }
       }
       
     } catch(Throwable e) {
-
       // Delete all DFS files
-
       deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
-
       throw new RuntimeException("Mini HBase cluster did not start");
     }
   }
@@ -141,39 +153,35 @@
    * supplied port is not necessarily the actual port used.
    */
   public HServerAddress getHMasterAddress() {
-    return master.getHMasterAddress();
+    return masterRunner.getHMasterAddress();
   }
   
   /** Shut down the HBase cluster */
   public void shutdown() {
-    System.out.println("Shutting down the HBase Cluster");
+    LOG.info("Shutting down the HBase Cluster");
     for(int i = 0; i < regionServers.length; i++) {
       regionServers[i].shutdown();
     }
-    master.shutdown();
-    
+    masterRunner.shutdown();
     for(int i = 0; i < regionServers.length; i++) {
       try {
         regionThreads[i].join();
-        
-      } catch(InterruptedException e) {
+      } catch (InterruptedException e) {
+        e.printStackTrace();
       }
     }
     try {
-      masterThread.join();
-      
-    } catch(InterruptedException e) {
+      masterRunnerThread.join();
+    } catch (InterruptedException e) {
+      e.printStackTrace();
     }
-    
-    System.out.println("Shutting down Mini DFS cluster");
     if (cluster != null) {
+      LOG.info("Shutting down Mini DFS cluster");
       cluster.shutdown();
     }
     
     // Delete all DFS files
-
     deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
-
   }
   
   private void deleteFile(File f) {
@@ -188,12 +196,14 @@
   
   private class HMasterRunner implements Runnable {
     private HMaster master = null;
+    private Thread masterThread = null;
     private volatile boolean isInitialized = false;
     private boolean isCrashed = false;
     private boolean isRunning = true;
+    private long threadSleepTime = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 1000);
     
     public HServerAddress getHMasterAddress() {
-      return master.getMasterAddress();
+      return this.master.getMasterAddress();
     }
     
     public synchronized boolean isInitialized() {
@@ -218,33 +228,46 @@
       try {
         synchronized(this) {
           if(isRunning) {
-            master = new HMaster(conf);
+            this.master = new HMaster(conf);
+            masterThread = new Thread(this.master);
+            masterThread.start();
           }
           isInitialized = true;
         }
       } catch(Throwable e) {
         shutdown();
-        System.err.println("HMaster crashed:");
-        e.printStackTrace();
+        LOG.error("HMaster crashed:", e);
         synchronized(this) {
           isCrashed = true;
         }
       }
+
+      while(this.master != null && this.master.isMasterRunning()) {
+        try {
+          Thread.sleep(threadSleepTime);
+          
+        } catch(InterruptedException e) {
+        }
+      }
+      synchronized(this) {
+        isCrashed = true;
+      }
+      shutdown();
     }
     
     /** Shut down the HMaster and wait for it to finish */
     public synchronized void shutdown() {
       isRunning = false;
-      if(master != null) {
+      if (this.master != null) {
         try {
-          master.stop();
-          
+          this.master.shutdown();
         } catch(IOException e) {
-          System.err.println("Master crashed during stop");
-          e.printStackTrace();
-          
+          LOG.error("Master crashed during stop", e);
         } finally {
-          master.join();
+          try {
+            masterThread.join();
+          } catch(InterruptedException e) {
+          }
           master = null;
         }
       }
@@ -272,8 +295,7 @@
         
       } catch(Throwable e) {
         shutdown();
-        System.err.println("HRegionServer crashed:");
-        e.printStackTrace();
+        LOG.error("HRegionServer crashed:", e);
       }
     }
     
@@ -285,9 +307,7 @@
           server.stop();
           
         } catch(IOException e) {
-          System.err.println("HRegionServer crashed during stop");
-          e.printStackTrace();
-          
+          LOG.error("HRegionServer crashed during stop", e);
         } finally {
           server.join();
           server = null;

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java?view=auto&rev=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java Mon May  7 12:58:53 2007
@@ -0,0 +1,231 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Text;
+
+import org.apache.log4j.Appender;
+import org.apache.log4j.ConsoleAppender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+
+import junit.framework.TestCase;
+
+public class TestGet extends TestCase {
+  private static final Text CONTENTS = new Text("contents:");
+  private static final Text ROW_KEY = new Text(HGlobals.rootRegionInfo.regionName);
+
+  
+  private void dumpRegion(HRegion r) throws IOException {
+    for(Iterator<HStore> i = r.stores.values().iterator(); i.hasNext(); ) {
+      i.next().dumpMaps();
+    }
+  }
+  
+  private void verifyGet(HRegion r) throws IOException {
+    // This should return a value because there is only one family member
+    
+    BytesWritable value = r.get(ROW_KEY, CONTENTS);
+    assertNotNull(value);
+    
+    // This should not return a value because there are multiple family members
+    
+    value = r.get(ROW_KEY, HConstants.COLUMN_FAMILY);
+    assertNull(value);
+    
+    // Find out what getFull returns
+    
+    TreeMap<Text, BytesWritable> values = r.getFull(ROW_KEY);
+    //assertEquals(4, values.keySet().size());
+    for(Iterator<Text> i = values.keySet().iterator(); i.hasNext(); ) {
+      Text column = i.next();
+      System.out.println(column);
+      if(column.equals(HConstants.COL_SERVER)) {
+        BytesWritable val = values.get(column);
+        byte[] bytes = new byte[val.getSize()];
+        System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+        System.out.println("  " + new String(bytes, HConstants.UTF8_ENCODING));
+      }
+    }
+  }
+  
+  @SuppressWarnings("unchecked")
+  public void testGet() throws IOException {
+    MiniDFSCluster cluster = null;
+
+    try {
+      
+      // Initialization
+      
+      if(System.getProperty("test.build.data") == null) {
+        String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
+        System.out.println(dir);
+        System.setProperty("test.build.data", dir);
+      }
+      Configuration conf = new HBaseConfiguration();
+    
+      Environment.getenv();
+      if(Environment.debugging) {
+        Logger rootLogger = Logger.getRootLogger();
+        rootLogger.setLevel(Level.WARN);
+
+        ConsoleAppender consoleAppender = null;
+        for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
+            e.hasMoreElements();) {
+        
+          Appender a = e.nextElement();
+          if(a instanceof ConsoleAppender) {
+            consoleAppender = (ConsoleAppender)a;
+            break;
+          }
+        }
+        if(consoleAppender != null) {
+          Layout layout = consoleAppender.getLayout();
+          if(layout instanceof PatternLayout) {
+            PatternLayout consoleLayout = (PatternLayout)layout;
+            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
+          }
+        }
+        Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
+      }
+      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+      FileSystem fs = cluster.getFileSystem();
+      Path dir = new Path("/hbase");
+      fs.mkdirs(dir);
+      
+      HTableDescriptor desc = new HTableDescriptor("test", 1);
+      desc.addFamily(CONTENTS);
+      desc.addFamily(HConstants.COLUMN_FAMILY);
+      
+      HRegionInfo info = new HRegionInfo(0L, desc, null, null);
+      Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
+      fs.mkdirs(regionDir);
+      
+      HLog log = new HLog(fs, new Path(regionDir, "log"), conf);
+
+      HRegion r = new HRegion(dir, log, fs, conf, info, null, null);
+      
+      // Write information to the table
+      
+      long lockid = r.startUpdate(ROW_KEY);
+      ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+      DataOutputStream s = new DataOutputStream(bytes);
+      CONTENTS.write(s);
+      r.put(lockid, CONTENTS, new BytesWritable(bytes.toByteArray()));
+
+      bytes.reset();
+      HGlobals.rootRegionInfo.write(s);
+      
+      r.put(lockid, HConstants.COL_REGIONINFO, new BytesWritable(bytes.toByteArray()));
+      
+      r.commit(lockid);
+      
+      lockid = r.startUpdate(ROW_KEY);
+
+      r.put(lockid, HConstants.COL_SERVER, 
+          new BytesWritable(
+              new HServerAddress("foo.bar.com:1234").toString().getBytes(HConstants.UTF8_ENCODING)
+              )
+      );
+      
+      r.put(lockid, HConstants.COL_STARTCODE, 
+          new BytesWritable(
+              String.valueOf(lockid).getBytes(HConstants.UTF8_ENCODING)
+              )
+      );
+      
+      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"), 
+          new BytesWritable("region".getBytes(HConstants.UTF8_ENCODING)));
+
+      r.commit(lockid);
+      
+      // Verify that get works the same from memcache as when reading from disk
+      // NOTE dumpRegion won't work here because it only reads from disk.
+      
+      verifyGet(r);
+      
+      // Close and re-open region, forcing updates to disk
+      
+      r.close();
+      log.rollWriter();
+      r = new HRegion(dir, log, fs, conf, info, null, null);
+      
+      // Read it back
+      
+      dumpRegion(r);
+      verifyGet(r);
+      
+      // Update one family member and add a new one
+      
+      lockid = r.startUpdate(ROW_KEY);
+
+      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
+          new BytesWritable("region2".getBytes()));
+
+      r.put(lockid, HConstants.COL_SERVER, 
+          new BytesWritable(
+              new HServerAddress("bar.foo.com:4321").toString().getBytes(HConstants.UTF8_ENCODING)
+              )
+      );
+      
+      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "junk"),
+          new BytesWritable("junk".getBytes()));
+      
+      r.commit(lockid);
+
+      verifyGet(r);
+      
+      // Close region and re-open it
+      
+      r.close();
+      log.rollWriter();
+      r = new HRegion(dir, log, fs, conf, info, null, null);
+
+      // Read it back
+      
+      dumpRegion(r);
+      verifyGet(r);
+
+      // Close region once and for all
+      
+      r.close();
+      
+    } catch(IOException e) {
+      e.printStackTrace();
+      throw e;
+      
+    } finally {
+      if(cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java?view=auto&rev=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java Mon May  7 12:58:53 2007
@@ -0,0 +1,289 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import junit.framework.TestCase;
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+
+import org.apache.log4j.Appender;
+import org.apache.log4j.ConsoleAppender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Logger;
+import org.apache.log4j.Level;
+import org.apache.log4j.PatternLayout;
+
+/**
+ * Test HBase Master and Region servers, client API 
+ */
+public class TestHBaseCluster extends TestCase {
+
+  /** constructor */
+  public TestHBaseCluster(String name) {
+    super(name);
+  }
+
+  /** Test suite so that all tests get run */
+  public static Test suite() {
+    TestSuite suite = new TestSuite();
+    suite.addTest(new TestHBaseCluster("testSetup"));
+    suite.addTest(new TestHBaseCluster("testBasic"));
+    suite.addTest(new TestHBaseCluster("testScanner"));
+    suite.addTest(new TestHBaseCluster("testCleanup"));
+    return suite;
+  }
+
+  private static final int FIRST_ROW = 1;
+  private static final int NUM_VALS = 1000;
+  private static final Text CONTENTS = new Text("contents:");
+  private static final Text CONTENTS_BASIC = new Text("contents:basic");
+  private static final String CONTENTSTR = "contentstr";
+  private static final Text ANCHOR = new Text("anchor:");
+  private static final String ANCHORNUM = "anchor:anchornum-";
+  private static final String ANCHORSTR = "anchorstr";
+
+  private static Configuration conf = null;
+  private static boolean failures = false;
+  private static boolean initialized = false;
+  private static MiniHBaseCluster cluster = null;
+  private static HTableDescriptor desc = null;
+  private static HClient client = null;
+
+  // Set up environment, start mini cluster, etc.
+  
+  @SuppressWarnings("unchecked")
+  public void testSetup() throws Exception {
+    try {
+      if(System.getProperty("test.build.data") == null) {
+        String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
+        System.out.println(dir);
+        System.setProperty("test.build.data", dir);
+      }
+      conf = new HBaseConfiguration();
+      
+      Environment.getenv();
+      if(Environment.debugging) {
+        Logger rootLogger = Logger.getRootLogger();
+        rootLogger.setLevel(Level.WARN);
+
+        ConsoleAppender consoleAppender = null;
+        for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
+            e.hasMoreElements();) {
+        
+          Appender a = e.nextElement();
+          if(a instanceof ConsoleAppender) {
+            consoleAppender = (ConsoleAppender)a;
+            break;
+          }
+        }
+        if(consoleAppender != null) {
+          Layout layout = consoleAppender.getLayout();
+          if(layout instanceof PatternLayout) {
+            PatternLayout consoleLayout = (PatternLayout)layout;
+            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
+          }
+        }
+        Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
+      }
+      cluster = new MiniHBaseCluster(conf, 1);
+      client = new HClient(conf);
+
+      desc = new HTableDescriptor("test", 3);
+      desc.addFamily(new Text(CONTENTS));
+      desc.addFamily(new Text(ANCHOR));
+      client.createTable(desc);
+      
+    } catch(Exception e) {
+      failures = true;
+      throw e;
+    }
+    initialized = true;
+  }
+      
+  // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
+
+  public void testBasic() throws IOException {
+    if(!initialized) {
+      throw new IllegalStateException();
+    }
+
+    try {
+      long startTime = System.currentTimeMillis();
+      
+      client.openTable(desc.getName());
+      
+      // Write out a bunch of values
+      
+      for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
+        long writeid = client.startUpdate(new Text("row_" + k));
+        client.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
+        client.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
+        client.commit(writeid);
+      }
+      System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+      // Read them back in
+
+      startTime = System.currentTimeMillis();
+      
+      Text collabel = null;
+      for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
+        Text rowlabel = new Text("row_" + k);
+
+        byte bodydata[] = client.get(rowlabel, CONTENTS_BASIC);
+        assertNotNull(bodydata);
+        String bodystr = new String(bodydata).toString().trim();
+        String teststr = CONTENTSTR + k;
+        assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
+            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+            bodystr, teststr);
+        collabel = new Text(ANCHORNUM + k);
+        bodydata = client.get(rowlabel, collabel);
+        bodystr = new String(bodydata).toString().trim();
+        teststr = ANCHORSTR + k;
+        assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
+            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+            bodystr, teststr);
+      }
+      
+      System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+    } catch(IOException e) {
+      failures = true;
+      throw e;
+    }
+  }
+  
+  public void testScanner() throws IOException {
+    if(!initialized || failures) {
+      throw new IllegalStateException();
+    }
+
+    Text[] cols = new Text[] {
+        new Text(ANCHORNUM + "[0-9]+"),
+        new Text(CONTENTS_BASIC)
+    };
+    
+    long startTime = System.currentTimeMillis();
+    
+    HScannerInterface s = client.obtainScanner(cols, new Text());
+    try {
+
+      int contentsFetched = 0;
+      int anchorFetched = 0;
+      HStoreKey curKey = new HStoreKey();
+      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      int k = 0;
+      while(s.next(curKey, curVals)) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+          Text col = it.next();
+          byte val[] = curVals.get(col);
+          String curval = new String(val).trim();
+
+          if(col.compareTo(CONTENTS_BASIC) == 0) {
+            assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
+                + ", Value for " + col + " should start with: " + CONTENTSTR
+                + ", but was fetched as: " + curval,
+                curval.startsWith(CONTENTSTR));
+            contentsFetched++;
+            
+          } else if(col.toString().startsWith(ANCHORNUM)) {
+            assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
+                + ", Value for " + col + " should start with: " + ANCHORSTR
+                + ", but was fetched as: " + curval,
+                curval.startsWith(ANCHORSTR));
+            anchorFetched++;
+            
+          } else {
+            System.out.println(col);
+          }
+        }
+        curVals.clear();
+        k++;
+      }
+      assertEquals("Expected " + NUM_VALS + " " + CONTENTS_BASIC + " values, but fetched " + contentsFetched, NUM_VALS, contentsFetched);
+      assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM + " values, but fetched " + anchorFetched, NUM_VALS, anchorFetched);
+
+      System.out.println("Scanned " + NUM_VALS
+          + " rows. Elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+    } catch(IOException e) {
+      failures = true;
+      throw e;
+      
+    } finally {
+      s.close();
+    }
+  }
+
+  public void testListTables() throws IOException {
+    if(!initialized || failures) {
+      throw new IllegalStateException();
+    }
+    
+    try {
+      HTableDescriptor[] tables = client.listTables();
+      assertEquals(1, tables.length);
+      assertEquals(desc.getName(), tables[0].getName());
+      TreeSet<Text> families = tables[0].families();
+      assertEquals(2, families.size());
+      assertTrue(families.contains(new Text(CONTENTS)));
+      assertTrue(families.contains(new Text(ANCHOR)));
+      
+    } catch(IOException e) {
+      failures = true;
+      throw e;
+    }
+  }
+  
+  public void testCleanup() throws IOException {
+    if(!initialized) {
+      throw new IllegalStateException();
+    }
+    
+    try {
+      if(!failures) {
+        // Delete the table we created
+
+        client.deleteTable(desc.getName());
+        try {
+          Thread.sleep(60000);                  // Wait for table to be deleted
+          
+        } catch(InterruptedException e) {
+        }
+      }
+      
+    } finally {
+      // Shut down the cluster
+    
+      cluster.shutdown();
+      client.close();
+    }
+  }
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java?view=diff&rev=535970&r1=535969&r2=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java Mon May  7 12:58:53 2007
@@ -16,6 +16,8 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
 import java.util.TreeMap;
 
 import junit.framework.TestCase;
@@ -26,11 +28,8 @@
 import org.apache.hadoop.hbase.HMemcache.Snapshot;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
 
 public class TestHMemcache extends TestCase {
-  private final Logger LOG =
-    Logger.getLogger(this.getClass().getName());
   
   private HMemcache hmemcache;
 
@@ -77,10 +76,10 @@
    */
   private void addRows(final HMemcache hmc) {
     for (int i = 0; i < ROW_COUNT; i++) {
-      TreeMap<Text, byte[]> columns = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> columns = new TreeMap<Text, BytesWritable>();
       for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
         Text k = getColumnName(i, ii);
-        columns.put(k, k.toString().getBytes());
+        columns.put(k, new BytesWritable(k.toString().getBytes()));
       }
       hmc.add(getRowName(i), columns, System.currentTimeMillis());
     }
@@ -139,7 +138,7 @@
   }
   
   private void isExpectedRow(final int rowIndex,
-      TreeMap<Text, byte[]> row) {
+      TreeMap<Text, BytesWritable> row) {
     int i = 0;
     for (Text colname: row.keySet()) {
       String expectedColname =
@@ -150,8 +149,10 @@
       // 100 bytes in size at least. This is the default size
       // for BytesWriteable.  For comparison, comvert bytes to
       // String and trim to remove trailing null bytes.
-      String colvalueStr =
-        new String(row.get(colname)).trim();
+      BytesWritable value = row.get(colname);
+      byte[] bytes = new byte[value.getSize()];
+      System.arraycopy(value.get(), 0, bytes, 0, bytes.length);
+      String colvalueStr = new String(bytes).trim();
       assertEquals("Content", colnameStr, colvalueStr);
     }
   }
@@ -160,7 +161,7 @@
     addRows(this.hmemcache);
     for (int i = 0; i < ROW_COUNT; i++) {
       HStoreKey hsk = new HStoreKey(getRowName(i));
-      TreeMap<Text, byte[]> all = this.hmemcache.getFull(hsk);
+      TreeMap<Text, BytesWritable> all = this.hmemcache.getFull(hsk);
       isExpectedRow(i, all);
     }
   }
@@ -174,16 +175,22 @@
         cols[(ii + (i * COLUMNS_COUNT))] = getColumnName(i, ii);
       }
     }
-    HScannerInterface scanner =
+    HInternalScannerInterface scanner =
       this.hmemcache.getScanner(timestamp, cols, new Text());
     HStoreKey key = new HStoreKey();
-    TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+    TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
     for (int i = 0; scanner.next(key, results); i++) {
       assertTrue("Row name",
           key.toString().startsWith(getRowName(i).toString()));
       assertEquals("Count of columns", COLUMNS_COUNT,
           results.size());
-      isExpectedRow(i, results);
+      TreeMap<Text, BytesWritable> row = new TreeMap<Text, BytesWritable>();
+      for(Iterator<Map.Entry<Text, BytesWritable>> it = results.entrySet().iterator();
+          it.hasNext(); ) {
+        Map.Entry<Text, BytesWritable> e = it.next();
+        row.put(e.getKey(), e.getValue());
+      }
+      isExpectedRow(i, row);
       // Clear out set.  Otherwise row results accumulate.
       results.clear();
     }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?view=diff&rev=535970&r1=535969&r2=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java Mon May  7 12:58:53 2007
@@ -31,6 +31,7 @@
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
 import org.apache.log4j.Appender;
@@ -167,8 +168,11 @@
       
       for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
         long writeid = region.startUpdate(new Text("row_" + k));
-        region.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
-        region.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
+        region.put(writeid, CONTENTS_BASIC,
+            new BytesWritable((CONTENTSTR + k).getBytes()));
+        
+        region.put(writeid, new Text(ANCHORNUM + k),
+            new BytesWritable((ANCHORSTR + k).getBytes()));
         region.commit(writeid);
       }
       System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
@@ -191,16 +195,20 @@
       for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
         Text rowlabel = new Text("row_" + k);
 
-        byte bodydata[] = region.get(rowlabel, CONTENTS_BASIC);
+        BytesWritable bodydata = region.get(rowlabel, CONTENTS_BASIC);
         assertNotNull(bodydata);
-        String bodystr = new String(bodydata).toString().trim();
+        byte[] bytes = new byte[bodydata.getSize()];
+        System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
+        String bodystr = new String(bytes).toString().trim();
         String teststr = CONTENTSTR + k;
         assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
             + "), expected: '" + teststr + "' got: '" + bodystr + "'",
             bodystr, teststr);
         collabel = new Text(ANCHORNUM + k);
         bodydata = region.get(rowlabel, collabel);
-        bodystr = new String(bodydata).toString().trim();
+        bytes = new byte[bodydata.getSize()];
+        System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
+        bodystr = new String(bytes).toString().trim();
         teststr = ANCHORSTR + k;
         assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
             + "), expected: '" + teststr + "' got: '" + bodystr + "'",
@@ -224,7 +232,7 @@
     // Try put with bad lockid.
     boolean exceptionThrown = false;
     try {
-      region.put(-1, CONTENTS_BASIC, "bad input".getBytes());
+      region.put(-1, CONTENTS_BASIC, new BytesWritable("bad input".getBytes()));
     } catch (LockException e) {
       exceptionThrown = true;
     }
@@ -237,7 +245,7 @@
       lockid = region.startUpdate(new Text("Some old key"));
       String unregisteredColName = "FamilyGroup:FamilyLabel";
       region.put(lockid, new Text(unregisteredColName),
-          unregisteredColName.getBytes());
+          new BytesWritable(unregisteredColName.getBytes()));
     } catch (IOException e) {
       exceptionThrown = true;
     } finally {
@@ -333,8 +341,8 @@
       String kLabel = String.format("%1$03d", k);
 
       long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
-      region.put(lockid, cols[0], vals1[k].getBytes());
-      region.put(lockid, cols[1], vals1[k].getBytes());
+      region.put(lockid, cols[0], new BytesWritable(vals1[k].getBytes()));
+      region.put(lockid, cols[1], new BytesWritable(vals1[k].getBytes()));
       region.commit(lockid);
       numInserted += 2;
     }
@@ -346,17 +354,19 @@
     
     startTime = System.currentTimeMillis();
 
-    HScannerInterface s = region.getScanner(cols, new Text());
+    HInternalScannerInterface s = region.getScanner(cols, new Text());
     int numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          byte val[] = curVals.get(col);
-          int curval = Integer.parseInt(new String(val).trim());
+          BytesWritable val = curVals.get(col);
+          byte[] bytes = new byte[val.getSize()];
+          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+          int curval = Integer.parseInt(new String(bytes).trim());
 
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
@@ -396,13 +406,15 @@
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          byte val[] = curVals.get(col);
-          int curval = Integer.parseInt(new String(val).trim());
+          BytesWritable val = curVals.get(col);
+          byte[] bytes = new byte[val.getSize()];
+          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+          int curval = Integer.parseInt(new String(bytes).trim());
 
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
@@ -433,8 +445,8 @@
       String kLabel = String.format("%1$03d", k);
       
       long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
-      region.put(lockid, cols[0], vals1[k].getBytes());
-      region.put(lockid, cols[1], vals1[k].getBytes());
+      region.put(lockid, cols[0], new BytesWritable(vals1[k].getBytes()));
+      region.put(lockid, cols[1], new BytesWritable(vals1[k].getBytes()));
       region.commit(lockid);
       numInserted += 2;
     }
@@ -450,13 +462,15 @@
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          byte val[] = curVals.get(col);
-          int curval = Integer.parseInt(new String(val).trim());
+          BytesWritable val = curVals.get(col);
+          byte[] bytes = new byte[val.getSize()];
+          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+          int curval = Integer.parseInt(new String(bytes).trim());
 
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
@@ -496,13 +510,15 @@
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          byte val[] = curVals.get(col);
-          int curval = Integer.parseInt(new String(val).trim());
+          BytesWritable val = curVals.get(col);
+          byte[] bytes = new byte[val.getSize()];
+          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+          int curval = Integer.parseInt(new String(bytes).trim());
 
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
@@ -532,13 +548,15 @@
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
       int k = 500;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          byte val[] = curVals.get(col);
-          int curval = Integer.parseInt(new String(val).trim());
+          BytesWritable val = curVals.get(col);
+          byte[] bytes = new byte[val.getSize()];
+          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+          int curval = Integer.parseInt(new String(bytes).trim());
 
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
@@ -592,7 +610,7 @@
 
         // Write to the HRegion
         long writeid = region.startUpdate(new Text("row_" + k));
-        region.put(writeid, CONTENTS_BODY, buf1.toString().getBytes());
+        region.put(writeid, CONTENTS_BODY, new BytesWritable(buf1.toString().getBytes()));
         region.commit(writeid);
         if (k > 0 && k % (N_ROWS / 100) == 0) {
           System.out.println("Flushing write #" + k);
@@ -707,20 +725,22 @@
     
     long startTime = System.currentTimeMillis();
     
-    HScannerInterface s = region.getScanner(cols, new Text());
+    HInternalScannerInterface s = region.getScanner(cols, new Text());
 
     try {
 
       int contentsFetched = 0;
       int anchorFetched = 0;
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          byte val[] = curVals.get(col);
-          String curval = new String(val).trim();
+          BytesWritable val = curVals.get(col);
+          byte[] bytes = new byte[val.getSize()];
+          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+          String curval = new String(bytes).trim();
 
           if(col.compareTo(CONTENTS_BASIC) == 0) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
@@ -767,13 +787,15 @@
     try {
       int numFetched = 0;
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
       int k = 0;
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
-          byte val[] = curVals.get(col);
-          int curval = Integer.parseInt(new String(val).trim());
+          BytesWritable val = curVals.get(col);
+          byte[] bytes = new byte[val.getSize()];
+          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+          int curval = Integer.parseInt(new String(bytes).trim());
 
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
@@ -805,12 +827,12 @@
       try {
         int numFetched = 0;
         HStoreKey curKey = new HStoreKey();
-        TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+        TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
         int k = 0;
         while(s.next(curKey, curVals)) {
           for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
             Text col = it.next();
-            byte val[] = curVals.get(col);
+            BytesWritable val = curVals.get(col);
 
             assertTrue(col.compareTo(CONTENTS_BODY) == 0);
             assertNotNull(val);
@@ -843,7 +865,7 @@
     try {
       int fetched = 0;
       HStoreKey curKey = new HStoreKey();
-      TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
+      TreeMap<Text, BytesWritable> curVals = new TreeMap<Text, BytesWritable>();
       while(s.next(curKey, curVals)) {
         for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           it.next();

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java?view=auto&rev=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java Mon May  7 12:58:53 2007
@@ -0,0 +1,319 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.Text;
+
+import org.apache.log4j.Appender;
+import org.apache.log4j.ConsoleAppender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+
+import junit.framework.TestCase;
+
+public class TestScanner extends TestCase {
+  private static final Text FIRST_ROW = new Text();
+  private static final Text[] COLS = {
+      HConstants.COLUMN_FAMILY
+  };
+  private static final Text[] EXPLICIT_COLS = {
+    HConstants.COL_REGIONINFO,
+    HConstants.COL_SERVER,
+    HConstants.COL_STARTCODE
+  };
+  
+  private static final Text ROW_KEY = new Text(HGlobals.rootRegionInfo.regionName);
+  private static final HRegionInfo REGION_INFO = 
+    new HRegionInfo(0L, HGlobals.rootTableDesc, null, null);
+  
+  private static final long START_CODE = Long.MAX_VALUE;
+
+  private HRegion region;
+  private DataInputBuffer in = new DataInputBuffer();
+
+  /** Compare the HRegionInfo we read from HBase to what we stored */
+  private void validateRegionInfo(BytesWritable regionBytes) throws IOException {
+    in.reset(regionBytes.get(), regionBytes.getSize());
+    HRegionInfo info = new HRegionInfo();
+    info.readFields(in);
+    
+    assertEquals(REGION_INFO.regionId, info.regionId);
+    assertEquals(0, info.startKey.getLength());
+    assertEquals(0, info.endKey.getLength());
+    assertEquals(0, info.regionName.compareTo(REGION_INFO.regionName));
+    assertEquals(0, info.tableDesc.compareTo(REGION_INFO.tableDesc));
+  }
+  
+  /** Use a scanner to get the region info and then validate the results */
+  private void scan(boolean validateStartcode, String serverName)
+      throws IOException {
+    
+    HInternalScannerInterface scanner = null;
+    TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
+    HStoreKey key = new HStoreKey();
+
+    Text[][] scanColumns = {
+        COLS,
+        EXPLICIT_COLS
+    };
+    
+    for(int i = 0; i < scanColumns.length; i++) {
+      try {
+        scanner = region.getScanner(scanColumns[i], FIRST_ROW);
+        while(scanner.next(key, results)) {
+          assertTrue(results.containsKey(HConstants.COL_REGIONINFO));
+          BytesWritable val = results.get(HConstants.COL_REGIONINFO); 
+          byte[] bytes = new byte[val.getSize()];
+          System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+          
+          validateRegionInfo(new BytesWritable(bytes));
+          
+          if(validateStartcode) {
+            assertTrue(results.containsKey(HConstants.COL_STARTCODE));
+            val = results.get(HConstants.COL_STARTCODE);
+            assertNotNull(val);
+            bytes = new byte[val.getSize()];
+            System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+            assertFalse(bytes.length == 0);
+            long startCode = 
+              Long.valueOf(new String(bytes, HConstants.UTF8_ENCODING));
+            assertEquals(START_CODE, startCode);
+          }
+          
+          if(serverName != null) {
+            assertTrue(results.containsKey(HConstants.COL_SERVER));
+            val = results.get(HConstants.COL_SERVER);
+            assertNotNull(val);
+            bytes = new byte[val.getSize()];
+            System.arraycopy(val.get(), 0, bytes, 0, bytes.length);
+            assertFalse(bytes.length == 0);
+            String server = new String(bytes, HConstants.UTF8_ENCODING);
+            assertEquals(0, server.compareTo(serverName));
+          }
+          results.clear();
+        }
+
+      } catch(IOException e) {
+        e.printStackTrace();
+        throw e;
+      
+      } finally {
+        if(scanner != null) {
+          try {
+            scanner.close();
+          
+          } catch(IOException e) {
+            e.printStackTrace();
+          }
+          scanner = null;
+        }
+      }
+    }
+  }
+
+  /** Use get to retrieve the HRegionInfo and validate it */
+  private void getRegionInfo() throws IOException {
+    BytesWritable bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO);
+    validateRegionInfo(bytes);  
+  }
+ 
+  /** The test! */
+  @SuppressWarnings("unchecked")
+  public void testScanner() throws IOException {
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    
+    try {
+      
+      // Initialization
+      
+      if(System.getProperty("test.build.data") == null) {
+        String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
+        System.out.println(dir);
+        System.setProperty("test.build.data", dir);
+      }
+      Configuration conf = new HBaseConfiguration();
+    
+      Environment.getenv();
+      if(Environment.debugging) {
+        Logger rootLogger = Logger.getRootLogger();
+        rootLogger.setLevel(Level.WARN);
+
+        ConsoleAppender consoleAppender = null;
+        for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
+            e.hasMoreElements();) {
+        
+          Appender a = e.nextElement();
+          if(a instanceof ConsoleAppender) {
+            consoleAppender = (ConsoleAppender)a;
+            break;
+          }
+        }
+        if(consoleAppender != null) {
+          Layout layout = consoleAppender.getLayout();
+          if(layout instanceof PatternLayout) {
+            PatternLayout consoleLayout = (PatternLayout)layout;
+            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
+          }
+        }
+        Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
+      }
+      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+      fs = cluster.getFileSystem();
+      Path dir = new Path("/hbase");
+      fs.mkdirs(dir);
+      
+      Path regionDir = HStoreFile.getHRegionDir(dir, REGION_INFO.regionName);
+      fs.mkdirs(regionDir);
+      
+      HLog log = new HLog(fs, new Path(regionDir, "log"), conf);
+
+      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
+      
+      // Write information to the meta table
+      
+      long lockid = region.startUpdate(ROW_KEY);
+
+      ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+      DataOutputStream s = new DataOutputStream(byteStream);
+      HGlobals.rootRegionInfo.write(s);
+      region.put(lockid, HConstants.COL_REGIONINFO,
+          new BytesWritable(byteStream.toByteArray()));
+      region.commit(lockid);
+
+      // What we just committed is in the memcache. Verify that we can get
+      // it back both with scanning and get
+      
+      scan(false, null);
+      getRegionInfo();
+      
+      // Close and re-open
+      
+      region.close();
+      log.rollWriter();
+      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
+
+      // Verify we can get the data back now that it is on disk.
+      
+      scan(false, null);
+      getRegionInfo();
+      
+      // Store some new information
+ 
+      HServerAddress address = new HServerAddress("foo.bar.com:1234");
+
+      lockid = region.startUpdate(ROW_KEY);
+
+      region.put(lockid, HConstants.COL_SERVER, 
+          new BytesWritable(address.toString().getBytes(HConstants.UTF8_ENCODING)));
+
+      region.put(lockid, HConstants.COL_STARTCODE, 
+          new BytesWritable(
+              String.valueOf(START_CODE).getBytes(HConstants.UTF8_ENCODING)));
+
+      region.commit(lockid);
+      
+      // Validate that we can still get the HRegionInfo, even though it is in
+      // an older row on disk and there is a newer row in the memcache
+      
+      scan(true, address.toString());
+      getRegionInfo();
+      
+      // flush cache
+
+      region.flushcache(false);
+
+      // Validate again
+      
+      scan(true, address.toString());
+      getRegionInfo();
+
+      // Close and reopen
+      
+      region.close();
+      log.rollWriter();
+      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
+
+      // Validate again
+      
+      scan(true, address.toString());
+      getRegionInfo();
+
+      // Now update the information again
+
+      address = new HServerAddress("bar.foo.com:4321");
+      
+      lockid = region.startUpdate(ROW_KEY);
+
+      region.put(lockid, HConstants.COL_SERVER, 
+          new BytesWritable(address.toString().getBytes(HConstants.UTF8_ENCODING)));
+
+      region.commit(lockid);
+      
+      // Validate again
+      
+      scan(true, address.toString());
+      getRegionInfo();
+
+      // flush cache
+
+      region.flushcache(false);
+
+      // Validate again
+      
+      scan(true, address.toString());
+      getRegionInfo();
+
+      // Close and reopen
+      
+      region.close();
+      log.rollWriter();
+      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
+
+      // Validate again
+      
+      scan(true, address.toString());
+      getRegionInfo();
+
+    } catch(IOException e) {
+      e.printStackTrace();
+      throw e;
+      
+    } finally {
+      if(fs != null) {
+        fs.close();
+      }
+      if(cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java?view=auto&rev=535970
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java Mon May  7 12:58:53 2007
@@ -0,0 +1,29 @@
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.io.Text;
+
+import junit.framework.TestCase;
+
+public class TestToString extends TestCase {
+  public void testServerInfo() throws Exception {
+    final String hostport = "127.0.0.1:9999";
+    HServerAddress address = new HServerAddress(hostport);
+    assertEquals("HServerAddress toString", address.toString(), hostport);
+    HServerInfo info = new HServerInfo(address, -1);
+    assertEquals("HServerInfo", info.toString(),
+        "address: " + hostport + ", startcode: " + -1);
+  }
+  
+  public void testHRegionInfo() throws Exception {
+    HTableDescriptor htd = new HTableDescriptor("hank", 10);
+    htd.addFamily(new Text("hankfamily:"));
+    htd.addFamily(new Text("hankotherfamily:"));
+    assertEquals("Table descriptor", htd.toString(),
+     "name: hank, maxVersions: 10, families: [hankfamily:, hankotherfamily:]");
+    HRegionInfo hri = new HRegionInfo(-1, htd, new Text(), new Text("10"));
+    assertEquals("HRegionInfo", 
+        "regionname: hank__-1, startKey: <>, tableDesc: {name: hank, " +
+        "maxVersions: 10, families: [hankfamily:, hankotherfamily:]}",
+        hri.toString());
+  }
+}



Mime
View raw message