hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r782445 [14/17] - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ bin/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/client/tableindexed/ src/java/org/apache/hadoop/hbase/client/tran...
Date Sun, 07 Jun 2009 19:57:43 GMT
Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=782445&r1=782444&r2=782445&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java Sun Jun  7 19:57:37 2009
@@ -114,7 +114,7 @@
     Path ncTFile = new Path(ROOT_DIR, "basic.hfile");
     FSDataOutputStream fout = createFSOutput(ncTFile);
     Writer writer = new Writer(fout, minBlockSize,
-      Compression.getCompressionAlgorithmByName(codec), null, false);
+      Compression.getCompressionAlgorithmByName(codec), null);
     LOG.info(writer);
     writeRecords(writer);
     fout.close();
@@ -178,7 +178,7 @@
     Path mFile = new Path(ROOT_DIR, "meta.hfile");
     FSDataOutputStream fout = createFSOutput(mFile);
     Writer writer = new Writer(fout, minBlockSize,
-      Compression.getCompressionAlgorithmByName(compress), null, false);
+      Compression.getCompressionAlgorithmByName(compress), null);
     someTestingWithMetaBlock(writer);
     writer.close();
     fout.close();
@@ -204,7 +204,7 @@
     Path mFile = new Path(ROOT_DIR, "nometa.hfile");
     FSDataOutputStream fout = createFSOutput(mFile);
     Writer writer = new Writer(fout, minBlockSize,
-        Compression.Algorithm.NONE, null, false);
+        Compression.Algorithm.NONE, null);
     writer.append("foo".getBytes(), "value".getBytes());
     writer.close();
     fout.close();
@@ -226,7 +226,7 @@
   public void testComparator() throws IOException {
     Path mFile = new Path(ROOT_DIR, "meta.tfile");
     FSDataOutputStream fout = createFSOutput(mFile);
-    Writer writer = new Writer(fout, minBlockSize, null,
+    Writer writer = new Writer(fout, minBlockSize, (Compression.Algorithm) null,
       new RawComparator<byte []>() {
         @Override
         public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
@@ -238,7 +238,7 @@
         public int compare(byte[] o1, byte[] o2) {
           return compare(o1, 0, o1.length, o2, 0, o2.length);
         }
-      }, false);
+      });
     writer.append("3".getBytes(), "0".getBytes());
     writer.append("2".getBytes(), "0".getBytes());
     writer.append("1".getBytes(), "0".getBytes());

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableIndex.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableIndex.java?rev=782445&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableIndex.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableIndex.java Sun Jun  7 19:57:37 2009
@@ -0,0 +1,266 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mapred;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Random;
+
+import junit.framework.TestSuite;
+import junit.textui.TestRunner;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MultiRegionTable;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiSearcher;
+import org.apache.lucene.search.Searchable;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.TermQuery;
+
+/**
+ * Test Map/Reduce job to build index over HBase table
+ */
+public class DisabledTestTableIndex extends MultiRegionTable {
+  private static final Log LOG = LogFactory.getLog(DisabledTestTableIndex.class);
+
+  static final String TABLE_NAME = "moretest";
+  static final String INPUT_COLUMN = "contents:";
+  static final byte [] TEXT_INPUT_COLUMN = Bytes.toBytes(INPUT_COLUMN);
+  static final String OUTPUT_COLUMN = "text:";
+  static final byte [] TEXT_OUTPUT_COLUMN = Bytes.toBytes(OUTPUT_COLUMN);
+  static final String ROWKEY_NAME = "key";
+  static final String INDEX_DIR = "testindex";
+  private static final byte [][] columns = new byte [][] {
+    TEXT_INPUT_COLUMN,
+    TEXT_OUTPUT_COLUMN
+  };
+
+  private JobConf jobConf = null;
+
+  /** default constructor */
+  public DisabledTestTableIndex() {
+    super(INPUT_COLUMN);
+    desc = new HTableDescriptor(TABLE_NAME);
+    desc.addFamily(new HColumnDescriptor(INPUT_COLUMN));
+    desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    if (jobConf != null) {
+      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
+    }
+  }
+
+  /**
+   * Test HBase map/reduce
+   * 
+   * @throws IOException
+   */
+  public void testTableIndex() throws IOException {
+    boolean printResults = false;
+    if (printResults) {
+      LOG.info("Print table contents before map/reduce");
+    }
+    scanTable(printResults);
+
+    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);
+
+    // set configuration parameter for index build
+    conf.set("hbase.index.conf", createIndexConfContent());
+
+    try {
+      jobConf = new JobConf(conf, DisabledTestTableIndex.class);
+      jobConf.setJobName("index column contents");
+      jobConf.setNumMapTasks(2);
+      // number of indexes to partition into
+      jobConf.setNumReduceTasks(1);
+
+      // use identity map (a waste, but just as an example)
+      IdentityTableMap.initJob(TABLE_NAME, INPUT_COLUMN,
+          IdentityTableMap.class, jobConf);
+
+      // use IndexTableReduce to build a Lucene index
+      jobConf.setReducerClass(IndexTableReduce.class);
+      FileOutputFormat.setOutputPath(jobConf, new Path(INDEX_DIR));
+      jobConf.setOutputFormat(IndexOutputFormat.class);
+
+      JobClient.runJob(jobConf);
+
+    } finally {
+      mrCluster.shutdown();
+    }
+
+    if (printResults) {
+      LOG.info("Print table contents after map/reduce");
+    }
+    scanTable(printResults);
+
+    // verify index results
+    verify();
+  }
+
+  private String createIndexConfContent() {
+    StringBuffer buffer = new StringBuffer();
+    buffer.append("<configuration><column><property>" +
+      "<name>hbase.column.name</name><value>" + INPUT_COLUMN +
+      "</value></property>");
+    buffer.append("<property><name>hbase.column.store</name> " +
+      "<value>true</value></property>");
+    buffer.append("<property><name>hbase.column.index</name>" +
+      "<value>true</value></property>");
+    buffer.append("<property><name>hbase.column.tokenize</name>" +
+      "<value>false</value></property>");
+    buffer.append("<property><name>hbase.column.boost</name>" +
+      "<value>3</value></property>");
+    buffer.append("<property><name>hbase.column.omit.norms</name>" +
+      "<value>false</value></property></column>");
+    buffer.append("<property><name>hbase.index.rowkey.name</name><value>" +
+      ROWKEY_NAME + "</value></property>");
+    buffer.append("<property><name>hbase.index.max.buffered.docs</name>" +
+      "<value>500</value></property>");
+    buffer.append("<property><name>hbase.index.max.field.length</name>" +
+      "<value>10000</value></property>");
+    buffer.append("<property><name>hbase.index.merge.factor</name>" +
+      "<value>10</value></property>");
+    buffer.append("<property><name>hbase.index.use.compound.file</name>" +
+      "<value>true</value></property>");
+    buffer.append("<property><name>hbase.index.optimize</name>" +
+      "<value>true</value></property></configuration>");
+
+    IndexConfiguration c = new IndexConfiguration();
+    c.addFromXML(buffer.toString());
+    return c.toString();
+  }
+
+  private void scanTable(boolean printResults)
+  throws IOException {
+    HTable table = new HTable(conf, TABLE_NAME);
+    Scan scan = new Scan();
+    scan.addColumns(columns);
+    ResultScanner scanner = table.getScanner(scan);
+    try {
+      for (Result r : scanner) {
+        if (printResults) {
+          LOG.info("row: " + r.getRow());
+        }
+        for (Map.Entry<byte [], Cell> e : r.getRowResult().entrySet()) {
+          if (printResults) {
+            LOG.info(" column: " + e.getKey() + " value: "
+                + new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
+          }
+        }
+      }
+    } finally {
+      scanner.close();
+    }
+  }
+
+  private void verify() throws IOException {
+    // Force a cache flush for every online region to ensure that when the
+    // scanner takes its snapshot, all the updates have made it into the cache.
+    for (HRegion r : cluster.getRegionThreads().get(0).getRegionServer().
+        getOnlineRegions()) {
+      HRegionIncommon region = new HRegionIncommon(r);
+      region.flushcache();
+    }
+
+    Path localDir = new Path(getUnitTestdir(getName()), "index_" +
+      Integer.toString(new Random().nextInt()));
+    this.fs.copyToLocalFile(new Path(INDEX_DIR), localDir);
+    FileSystem localfs = FileSystem.getLocal(conf);
+    FileStatus [] indexDirs = localfs.listStatus(localDir);
+    Searcher searcher = null;
+    ResultScanner scanner = null;
+    try {
+      if (indexDirs.length == 1) {
+        searcher = new IndexSearcher((new File(indexDirs[0].getPath().
+          toUri())).getAbsolutePath());
+      } else if (indexDirs.length > 1) {
+        Searchable[] searchers = new Searchable[indexDirs.length];
+        for (int i = 0; i < indexDirs.length; i++) {
+          searchers[i] = new IndexSearcher((new File(indexDirs[i].getPath().
+            toUri()).getAbsolutePath()));
+        }
+        searcher = new MultiSearcher(searchers);
+      } else {
+        throw new IOException("no index directory found");
+      }
+
+      HTable table = new HTable(conf, TABLE_NAME);
+      Scan scan = new Scan();
+      scan.addColumns(columns);
+      scanner = table.getScanner(scan);
+
+      IndexConfiguration indexConf = new IndexConfiguration();
+      String content = conf.get("hbase.index.conf");
+      if (content != null) {
+        indexConf.addFromXML(content);
+      }
+      String rowkeyName = indexConf.getRowkeyName();
+
+      int count = 0;
+      for (Result r : scanner) {
+        String value = Bytes.toString(r.getRow());
+        Term term = new Term(rowkeyName, value);
+        int hitCount = searcher.search(new TermQuery(term)).length();
+        assertEquals("check row " + value, 1, hitCount);
+        count++;
+      }
+      LOG.debug("Searcher.maxDoc: " + searcher.maxDoc());
+      LOG.debug("IndexReader.numDocs: " + ((IndexSearcher)searcher).getIndexReader().numDocs());      
+      int maxDoc = ((IndexSearcher)searcher).getIndexReader().numDocs();
+      assertEquals("check number of rows", maxDoc, count);
+    } finally {
+      if (null != searcher)
+        searcher.close();
+      if (null != scanner)
+        scanner.close();
+    }
+  }
+  /**
+   * @param args unused
+   */
+  public static void main(String[] args) {
+    TestRunner.run(new TestSuite(DisabledTestTableIndex.class));
+  }
+}
\ No newline at end of file

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableMapReduce.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableMapReduce.java?rev=782445&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableMapReduce.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableMapReduce.java Sun Jun  7 19:57:37 2009
@@ -0,0 +1,247 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mapred;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MultiRegionTable;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapReduceBase;
+import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.hadoop.mapred.OutputCollector;
+import org.apache.hadoop.mapred.Reporter;
+
+/**
+ * Test Map/Reduce job over HBase tables. The map/reduce process we're testing
+ * on our tables is simple - take every row in the table, reverse the value of
+ * a particular cell, and write it back to the table.
+ */
+public class DisabledTestTableMapReduce extends MultiRegionTable {
+  private static final Log LOG =
+    LogFactory.getLog(DisabledTestTableMapReduce.class.getName());
+
+  static final String MULTI_REGION_TABLE_NAME = "mrtest";
+  static final String INPUT_COLUMN = "contents:";
+  static final String OUTPUT_COLUMN = "text:";
+  
+  private static final byte [][] columns = new byte [][] {
+    Bytes.toBytes(INPUT_COLUMN),
+    Bytes.toBytes(OUTPUT_COLUMN)
+  };
+
+  /** constructor */
+  public DisabledTestTableMapReduce() {
+    super(INPUT_COLUMN);
+    desc = new HTableDescriptor(MULTI_REGION_TABLE_NAME);
+    desc.addFamily(new HColumnDescriptor(INPUT_COLUMN));
+    desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
+  }
+
+  /**
+   * Pass the given key and processed record reduce
+   */
+  public static class ProcessContentsMapper
+  extends MapReduceBase
+  implements TableMap<ImmutableBytesWritable, BatchUpdate> {
+    /**
+     * Pass the key, and reversed value to reduce
+     * @param key 
+     * @param value 
+     * @param output 
+     * @param reporter 
+     * @throws IOException 
+     */
+    public void map(ImmutableBytesWritable key, RowResult value,
+      OutputCollector<ImmutableBytesWritable, BatchUpdate> output,
+      Reporter reporter) 
+    throws IOException {
+      if (value.size() != 1) {
+        throw new IOException("There should only be one input column");
+      }
+      byte [][] keys = value.keySet().toArray(new byte [value.size()][]);
+      if(!Bytes.equals(keys[0], Bytes.toBytes(INPUT_COLUMN))) {
+        throw new IOException("Wrong input column. Expected: " + INPUT_COLUMN
+            + " but got: " + keys[0]);
+      }
+
+      // Get the original value and reverse it
+      
+      String originalValue =
+        new String(value.get(keys[0]).getValue(), HConstants.UTF8_ENCODING);
+      StringBuilder newValue = new StringBuilder();
+      for(int i = originalValue.length() - 1; i >= 0; i--) {
+        newValue.append(originalValue.charAt(i));
+      }
+      
+      // Now set the value to be collected
+
+      BatchUpdate outval = new BatchUpdate(key.get());
+      outval.put(OUTPUT_COLUMN, Bytes.toBytes(newValue.toString()));
+      output.collect(key, outval);
+    }
+  }
+  
+  /**
+   * Test a map/reduce against a multi-region table
+   * @throws IOException
+   */
+  public void testMultiRegionTable() throws IOException {
+    runTestOnTable(new HTable(conf, MULTI_REGION_TABLE_NAME));
+  }
+
+  private void runTestOnTable(HTable table) throws IOException {
+    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);
+
+    JobConf jobConf = null;
+    try {
+      LOG.info("Before map/reduce startup");
+      jobConf = new JobConf(conf, DisabledTestTableMapReduce.class);
+      jobConf.setJobName("process column contents");
+      jobConf.setNumReduceTasks(1);
+      TableMapReduceUtil.initTableMapJob(Bytes.toString(table.getTableName()),
+        INPUT_COLUMN, ProcessContentsMapper.class,
+        ImmutableBytesWritable.class, BatchUpdate.class, jobConf);
+      TableMapReduceUtil.initTableReduceJob(Bytes.toString(table.getTableName()),
+        IdentityTableReduce.class, jobConf);
+            
+      LOG.info("Started " + Bytes.toString(table.getTableName()));
+      JobClient.runJob(jobConf);
+      LOG.info("After map/reduce completion");
+
+      // verify map-reduce results
+      verify(Bytes.toString(table.getTableName()));
+    } finally {
+      mrCluster.shutdown();
+      if (jobConf != null) {
+        FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
+      }
+    }
+  }
+
+  private void verify(String tableName) throws IOException {
+    HTable table = new HTable(conf, tableName);
+    boolean verified = false;
+    long pause = conf.getLong("hbase.client.pause", 5 * 1000);
+    int numRetries = conf.getInt("hbase.client.retries.number", 5);
+    for (int i = 0; i < numRetries; i++) {
+      try {
+        LOG.info("Verification attempt #" + i);
+        verifyAttempt(table);
+        verified = true;
+        break;
+      } catch (NullPointerException e) {
+        // If here, a cell was empty.  Presume its because updates came in
+        // after the scanner had been opened.  Wait a while and retry.
+        LOG.debug("Verification attempt failed: " + e.getMessage());
+      }
+      try {
+        Thread.sleep(pause);
+      } catch (InterruptedException e) {
+        // continue
+      }
+    }
+    assertTrue(verified);
+  }
+
+  /**
+   * Looks at every value of the mapreduce output and verifies that indeed
+   * the values have been reversed.
+   * @param table Table to scan.
+   * @throws IOException
+   * @throws NullPointerException if we failed to find a cell value
+   */
+  private void verifyAttempt(final HTable table) throws IOException, NullPointerException {
+    Scan scan = new Scan();
+    scan.addColumns(columns);
+    ResultScanner scanner = table.getScanner(scan);
+    try {
+      for (Result r : scanner) {
+        if (LOG.isDebugEnabled()) {
+          if (r.size() > 2 ) {
+            throw new IOException("Too many results, expected 2 got " +
+              r.size());
+          }
+        }
+        byte[] firstValue = null;
+        byte[] secondValue = null;
+        int count = 0;
+        for(Map.Entry<byte [], Cell> e: r.getRowResult().entrySet()) {
+          if (count == 0) {
+            firstValue = e.getValue().getValue();
+          }
+          if (count == 1) {
+            secondValue = e.getValue().getValue();
+          }
+          count++;
+          if (count == 2) {
+            break;
+          }
+        }
+        
+        String first = "";
+        if (firstValue == null) {
+          throw new NullPointerException(Bytes.toString(r.getRow()) +
+            ": first value is null");
+        }
+        first = new String(firstValue, HConstants.UTF8_ENCODING);
+        
+        String second = "";
+        if (secondValue == null) {
+          throw new NullPointerException(Bytes.toString(r.getRow()) +
+            ": second value is null");
+        }
+        byte[] secondReversed = new byte[secondValue.length];
+        for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) {
+          secondReversed[i] = secondValue[j];
+        }
+        second = new String(secondReversed, HConstants.UTF8_ENCODING);
+
+        if (first.compareTo(second) != 0) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("second key is not the reverse of first. row=" +
+                r.getRow() + ", first value=" + first + ", second value=" +
+                second);
+          }
+          fail();
+        }
+      }
+    } finally {
+      scanner.close();
+    }
+  }
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java?rev=782445&r1=782444&r2=782445&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java Sun Jun  7 19:57:37 2009
@@ -32,7 +32,10 @@
 import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -102,15 +105,15 @@
   private byte [] createTableAndAddRow(final String tableName)
   throws IOException {
     HTableDescriptor desc = new HTableDescriptor(tableName);
-    desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY));
+    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
     HBaseAdmin admin = new HBaseAdmin(conf);
     admin.createTable(desc);
     // put some values in the table
     this.table = new HTable(conf, tableName);
     byte [] row = Bytes.toBytes("row1");
-    BatchUpdate b = new BatchUpdate(row);
-    b.put(HConstants.COLUMN_FAMILY, Bytes.toBytes(tableName));
-    table.commit(b);
+    Put put = new Put(row);
+    put.add(HConstants.CATALOG_FAMILY, null, Bytes.toBytes(tableName));
+    table.put(put);
     return row;
   }
 
@@ -166,27 +169,29 @@
           // Now try to open a scanner on the meta table. Should stall until
           // meta server comes back up.
           HTable t = new HTable(conf, HConstants.META_TABLE_NAME);
-          Scanner s =
-            t.getScanner(HConstants.COLUMN_FAMILY_ARRAY,
-              HConstants.EMPTY_START_ROW);
+          Scan scan = new Scan();
+          scan.addFamily(HConstants.CATALOG_FAMILY);
+
+          ResultScanner s = t.getScanner(scan);
           s.close();
           
         } catch (IOException e) {
           LOG.fatal("could not re-open meta table because", e);
           fail();
         }
-        Scanner scanner = null;
+        ResultScanner scanner = null;
         try {
           // Verify that the client can find the data after the region has moved
           // to a different server
-          scanner =
-            table.getScanner(HConstants.COLUMN_FAMILY_ARRAY,
-               HConstants.EMPTY_START_ROW);
+          Scan scan = new Scan();
+          scan.addFamily(HConstants.CATALOG_FAMILY);
+
+          scanner = table.getScanner(scan);
           LOG.info("Obtained scanner " + scanner);
-          for (RowResult r : scanner) {
+          for (Result r : scanner) {
             assertTrue(Bytes.equals(r.getRow(), row));
             assertEquals(1, r.size());
-            byte[] bytes = r.get(HConstants.COLUMN_FAMILY).getValue();
+            byte[] bytes = r.getRowResult().get(HConstants.CATALOG_FAMILY).getValue();
             assertNotNull(bytes);
             assertTrue(tableName.equals(Bytes.toString(bytes)));
           }

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java?rev=782445&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java Sun Jun  7 19:57:37 2009
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
+import org.apache.hadoop.hbase.KeyValue;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Collections;
+
+/**
+ * A fixture that implements and presents a KeyValueScanner.
+ * It takes a list of key/values which is then sorted according
+ * to the provided comparator, and then the whole thing pretends
+ * to be a store file scanner.
+ */
+public class KeyValueScanFixture implements KeyValueScanner {
+  ArrayList<KeyValue> data;
+  Iterator<KeyValue> iter = null;
+  KeyValue current = null;
+  KeyValue.KVComparator comparator;
+
+  public KeyValueScanFixture(KeyValue.KVComparator comparator,
+                             KeyValue... incData) {
+    this.comparator = comparator;
+
+    data = new ArrayList<KeyValue>(incData.length);
+    for( int i = 0; i < incData.length ; ++i) {
+      data.add(incData[i]);
+    }
+    Collections.sort(data, this.comparator);
+  }
+
+  @Override
+  public KeyValue peek() {
+    return this.current;
+  }
+
+  @Override
+  public KeyValue next() {
+    KeyValue res = current;
+
+    if (iter.hasNext())
+      current = iter.next();
+    else
+      current = null;
+    return res;
+  }
+
+  @Override
+  public boolean seek(KeyValue key) {
+    // start at beginning.
+    iter = data.iterator();
+      int cmp;
+    KeyValue kv = null;
+    do {
+      if (!iter.hasNext()) {
+        current = null;
+        return false;
+      }
+      kv = iter.next();
+      cmp = comparator.compare(key, kv);
+    } while (cmp > 0);
+    current = kv;
+    return true;
+  }
+
+  @Override
+  public void close() {
+    // noop.
+  }
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java?rev=782445&r1=782444&r2=782445&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java Sun Jun  7 19:57:37 2009
@@ -23,7 +23,7 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HServerAddress;
 
@@ -35,7 +35,7 @@
  * <code>${HBASE_HOME}/bin/hbase ./bin/hbase org.apache.hadoop.hbase.OOMERegionServer start</code>.
  */
 public class OOMERegionServer extends HRegionServer {
-  private List<BatchUpdate> retainer = new ArrayList<BatchUpdate>();
+  private List<Put> retainer = new ArrayList<Put>();
 
   public OOMERegionServer(HBaseConfiguration conf) throws IOException {
     super(conf);
@@ -46,12 +46,12 @@
     super(address, conf);
   }
   
-  public void batchUpdate(byte [] regionName, BatchUpdate b)
+  public void put(byte [] regionName, Put put)
   throws IOException {
-    super.batchUpdate(regionName, b, -1L);
+    super.put(regionName, put);
     for (int i = 0; i < 30; i++) {
       // Add the batch update 30 times to bring on the OOME faster.
-      this.retainer.add(b);
+      this.retainer.add(put);
     }
   }
   

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=782445&r1=782444&r2=782445&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Sun Jun  7 19:57:37 2009
@@ -23,11 +23,14 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -38,7 +41,7 @@
 public class TestCompaction extends HBaseTestCase {
   static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
   private HRegion r = null;
-  private static final byte [] COLUMN_FAMILY = COLFAMILY_NAME1;
+  private static final byte [] COLUMN_FAMILY = fam1;
   private final byte [] STARTROW = Bytes.toBytes(START_KEY);
   private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
   private static final int COMPACTION_THRESHOLD = MAXVERSIONS;
@@ -91,11 +94,16 @@
     // Default is that there only 3 (MAXVERSIONS) versions allowed per column.
     // Assert == 3 when we ask for versions.
     addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
+
+    
     // FIX!!
-    Cell[] cellValues = 
-      Cell.createSingleCellArray(r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
+//    Cell[] cellValues =
+//      Cell.createSingleCellArray(r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
+    Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
+
     // Assert that I can get 3 versions since it is the max I should get
-    assertEquals(cellValues.length, 3);
+    assertEquals(3, result.size());
+//    assertEquals(cellValues.length, 3);
     r.flushcache();
     r.compactStores();
     // Always 3 versions if that is what max versions is.
@@ -103,32 +111,49 @@
     // Increment the least significant character so we get to next row.
     secondRowBytes[START_KEY_BYTES.length - 1]++;
     // FIX
-    cellValues = Cell.createSingleCellArray(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100/*Too many*/));
-    LOG.info("Count of " + Bytes.toString(secondRowBytes) + ": " +
-      cellValues.length);
-    assertTrue(cellValues.length == 3);
+    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
+
+    // Assert that I can get 3 versions since it is the max I should get
+    assertEquals(3, result.size());
+//
+//    cellValues = Cell.createSingleCellArray(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100/*Too many*/));
+//    LOG.info("Count of " + Bytes.toString(secondRowBytes) + ": " +
+//      cellValues.length);
+//    assertTrue(cellValues.length == 3);
 
     // Now add deletes to memcache and then flush it.  That will put us over
     // the compaction threshold of 3 store files.  Compacting these store files
     // should result in a compacted store file that has no references to the
     // deleted row.
-    r.deleteAll(secondRowBytes, COLUMN_FAMILY_TEXT, System.currentTimeMillis(),
-      null);
+    Delete delete = new Delete(secondRowBytes, System.currentTimeMillis(), null);
+    byte [][] famAndQf = {COLUMN_FAMILY, null};
+    delete.deleteFamily(famAndQf[0]);
+    r.delete(delete, null, true);
+    
     // Assert deleted.
-    assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
+
+    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
+    assertTrue(result.isEmpty());
+
+
     r.flushcache();
-    assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
+    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
+    assertTrue(result.isEmpty());
+
     // Add a bit of data and flush.  Start adding at 'bbb'.
     createSmallerStoreFile(this.r);
     r.flushcache();
     // Assert that the second row is still deleted.
-    cellValues = Cell.createSingleCellArray(r.get(secondRowBytes,
-      COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
-    assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
+    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
+    assertTrue(result.isEmpty());
+
     // Force major compaction.
     r.compactStores(true);
     assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
-    assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
+
+    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
+    assertTrue(result.isEmpty());
+
     // Make sure the store files do have some 'aaa' keys in them -- exactly 3.
     // Also, that compacted store files do not have any secondRowBytes because
     // they were deleted.

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java?rev=782445&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java Sun Jun  7 19:57:37 2009
@@ -0,0 +1,191 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueTestUtil;
+import org.apache.hadoop.hbase.regionserver.DeleteCompare.DeleteCode;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestDeleteCompare extends TestCase {
+
+  //Cases to compare:
+  //1. DeleteFamily and whatever of the same row
+  //2. DeleteColumn and whatever of the same row + qualifier
+  //3. Delete and the matching put
+  //4. Big test that include starting on the wrong row and qualifier
+  public void testDeleteCompare_DeleteFamily() {
+    //Creating memcache
+    Set<KeyValue> memcache = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col2", 1, "d-c"));
+
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col3", 3, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col3", 2, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col3", 1, "d-c"));
+
+    memcache.add(KeyValueTestUtil.create("row21", "fam", "col1", 1, "d-c"));
+
+    //Creating expected result
+    List<DeleteCode> expected = new ArrayList<DeleteCode>();
+    expected.add(DeleteCode.SKIP);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.SKIP);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DONE);
+
+    KeyValue delete = KeyValueTestUtil.create("row11",
+        "fam", "", 2, KeyValue.Type.DeleteFamily, "dont-care");
+    byte [] deleteBuffer = delete.getBuffer();
+    int deleteRowOffset = delete.getRowOffset();
+    short deleteRowLen = delete.getRowLength();
+    int deleteQualifierOffset = delete.getQualifierOffset();
+    int deleteQualifierLen = delete.getQualifierLength();
+    int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
+    byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
+    
+    List<DeleteCode> actual = new ArrayList<DeleteCode>();
+    for(KeyValue mem : memcache){
+    actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
+        deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
+        deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
+      
+    }
+    
+    assertEquals(expected.size(), actual.size());
+    for(int i=0; i<expected.size(); i++){
+      assertEquals(expected.get(i), actual.get(i));
+    }
+  }
+  
+  public void testDeleteCompare_DeleteColumn() {
+    //Creating memcache
+    Set<KeyValue> memcache = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row21", "fam", "col1", 1, "d-c"));
+
+
+    //Creating expected result
+    List<DeleteCode> expected = new ArrayList<DeleteCode>();
+    expected.add(DeleteCode.SKIP);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DONE);
+    
+    KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2,
+        KeyValue.Type.DeleteColumn, "dont-care");
+    byte [] deleteBuffer = delete.getBuffer();
+    int deleteRowOffset = delete.getRowOffset();
+    short deleteRowLen = delete.getRowLength();
+    int deleteQualifierOffset = delete.getQualifierOffset();
+    int deleteQualifierLen = delete.getQualifierLength();
+    int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
+    byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
+    
+    List<DeleteCode> actual = new ArrayList<DeleteCode>();
+    for(KeyValue mem : memcache){
+    actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
+        deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
+        deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
+      
+    }
+    
+    assertEquals(expected.size(), actual.size());
+    for(int i=0; i<expected.size(); i++){
+      assertEquals(expected.get(i), actual.get(i));
+    }
+  }
+  
+  
+  public void testDeleteCompare_Delete() {
+    //Creating memcache
+    Set<KeyValue> memcache = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
+
+    //Creating expected result
+    List<DeleteCode> expected = new ArrayList<DeleteCode>();
+    expected.add(DeleteCode.SKIP);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DONE);
+    
+    KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2,
+        KeyValue.Type.Delete, "dont-care");
+    byte [] deleteBuffer = delete.getBuffer();
+    int deleteRowOffset = delete.getRowOffset();
+    short deleteRowLen = delete.getRowLength();
+    int deleteQualifierOffset = delete.getQualifierOffset();
+    int deleteQualifierLen = delete.getQualifierLength();
+    int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
+    byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
+    
+    List<DeleteCode> actual = new ArrayList<DeleteCode>();
+    for(KeyValue mem : memcache){
+    actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
+        deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
+        deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
+    }
+    
+    assertEquals(expected.size(), actual.size());
+    for(int i=0; i<expected.size(); i++){
+      assertEquals(expected.get(i), actual.get(i));
+    }
+  }
+  
+  public void testDeleteCompare_Multiple() {
+    //Creating memcache
+    Set<KeyValue> memcache = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
+    memcache.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row21", "fam", "col1", 4, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row21", "fam", "col1", 3, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row21", "fam", "col1", 2, "d-c"));
+    memcache.add(KeyValueTestUtil.create("row21", "fam", "col1", 1,
+        KeyValue.Type.Delete, "dont-care"));
+    memcache.add(KeyValueTestUtil.create("row31", "fam", "col1", 1, "dont-care"));
+
+    //Creating expected result
+    List<DeleteCode> expected = new ArrayList<DeleteCode>();
+    expected.add(DeleteCode.SKIP);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DELETE);
+    expected.add(DeleteCode.DONE);
+
+    KeyValue delete = KeyValueTestUtil.create("row21", "fam", "col1", 5,
+        KeyValue.Type.DeleteColumn, "dont-care");
+    byte [] deleteBuffer = delete.getBuffer();
+    int deleteRowOffset = delete.getRowOffset();
+    short deleteRowLen = delete.getRowLength();
+    int deleteQualifierOffset = delete.getQualifierOffset();
+    int deleteQualifierLen = delete.getQualifierLength();
+    int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
+    byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
+    
+    List<DeleteCode> actual = new ArrayList<DeleteCode>();
+    for(KeyValue mem : memcache){
+    actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
+        deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
+        deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
+      
+    }
+    
+    assertEquals(expected.size(), actual.size());
+    for(int i=0; i<expected.size(); i++){
+      assertEquals(expected.get(i), actual.get(i));
+    }
+  }
+}

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java?rev=782445&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java Sun Jun  7 19:57:37 2009
@@ -0,0 +1,144 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+public class TestExplicitColumnTracker extends HBaseTestCase
+implements HConstants {
+  private boolean PRINT = false; 
+  
+  public void testGet_SingleVersion(){
+    if(PRINT){
+      System.out.println("SingleVersion");
+    }
+    byte [] col1 = Bytes.toBytes("col1");
+    byte [] col2 = Bytes.toBytes("col2");
+    byte [] col3 = Bytes.toBytes("col3");
+    byte [] col4 = Bytes.toBytes("col4");
+    byte [] col5 = Bytes.toBytes("col5");
+    
+    //Create tracker
+    TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+    //Looking for every other
+    columns.add(col2);
+    columns.add(col4);
+    List<MatchCode> expected = new ArrayList<MatchCode>();
+    expected.add(MatchCode.SKIP);
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.SKIP);
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.DONE);
+    int maxVersions = 1;
+    
+    ColumnTracker exp = new ExplicitColumnTracker(columns, maxVersions);
+        
+    //Create "Scanner"
+    List<byte[]> scanner = new ArrayList<byte[]>();
+    scanner.add(col1);
+    scanner.add(col2);
+    scanner.add(col3);
+    scanner.add(col4);
+    scanner.add(col5);
+    
+    //Initialize result
+    List<MatchCode> result = new ArrayList<MatchCode>(); 
+    
+    //"Match"
+    for(byte [] col : scanner){
+      result.add(exp.checkColumn(col, 0, col.length));
+    }
+    
+    assertEquals(expected.size(), result.size());
+    for(int i=0; i< expected.size(); i++){
+      assertEquals(expected.get(i), result.get(i));
+      if(PRINT){
+        System.out.println("Expected " +expected.get(i) + ", actual " +
+            result.get(i));
+      }
+    }
+  }
+  
+  public void testGet_MultiVersion(){
+    if(PRINT){
+      System.out.println("\nMultiVersion");
+    }
+    byte [] col1 = Bytes.toBytes("col1");
+    byte [] col2 = Bytes.toBytes("col2");
+    byte [] col3 = Bytes.toBytes("col3");
+    byte [] col4 = Bytes.toBytes("col4");
+    byte [] col5 = Bytes.toBytes("col5");
+    
+    //Create tracker
+    TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+    //Looking for every other
+    columns.add(col2);
+    columns.add(col4);
+    
+    List<MatchCode> expected = new ArrayList<MatchCode>();
+    expected.add(MatchCode.SKIP);
+    expected.add(MatchCode.SKIP);
+    expected.add(MatchCode.SKIP);
+
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.SKIP);
+
+    expected.add(MatchCode.SKIP);
+    expected.add(MatchCode.SKIP);
+    expected.add(MatchCode.SKIP);
+
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.DONE);
+
+    expected.add(MatchCode.DONE);
+    expected.add(MatchCode.DONE);
+    expected.add(MatchCode.DONE);
+    int maxVersions = 2;
+    
+    ColumnTracker exp = new ExplicitColumnTracker(columns, maxVersions);
+        
+    //Create "Scanner"
+    List<byte[]> scanner = new ArrayList<byte[]>();
+    scanner.add(col1);
+    scanner.add(col1);
+    scanner.add(col1);
+    scanner.add(col2);
+    scanner.add(col2);
+    scanner.add(col2);
+    scanner.add(col3);
+    scanner.add(col3);
+    scanner.add(col3);
+    scanner.add(col4);
+    scanner.add(col4);
+    scanner.add(col4);
+    scanner.add(col5);
+    scanner.add(col5);
+    scanner.add(col5);
+    
+    //Initialize result
+    List<MatchCode> result = new ArrayList<MatchCode>(); 
+    
+    //"Match"
+    for(byte [] col : scanner){
+      result.add(exp.checkColumn(col, 0, col.length));
+    }
+    
+    assertEquals(expected.size(), result.size());
+    for(int i=0; i< expected.size(); i++){
+      assertEquals(expected.get(i), result.get(i));
+      if(PRINT){
+        System.out.println("Expected " +expected.get(i) + ", actual " +
+            result.get(i));
+      }
+    }
+  }
+  
+}

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java?rev=782445&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java Sun Jun  7 19:57:37 2009
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.GetDeleteTracker.Delete;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+public class TestGetDeleteTracker extends HBaseTestCase implements HConstants {
+  
+  private final boolean PRINT = true;
+  
+  private byte [] col1 = null;
+  private byte [] col2 = null;
+  
+  private int col1Len = 0;
+  private int col2Len = 0;
+
+  private byte [] empty = null;
+  
+  private long ts1 = 0L;
+  private long ts2 = 0L;
+  private long ts3 = 0L;
+  
+  
+  private Delete del10 = null;
+  private Delete del11 = null;
+  private Delete delQf10 = null;
+  private Delete delQf11 = null;
+  private Delete delFam10 = null;
+  
+  private Delete del20 = null;
+  private Delete del21 = null;
+  private Delete delQf20 = null;
+  private Delete delQf21 = null;
+  private Delete delFam20 = null;
+  
+  
+  private Delete del30 = null;
+  
+  GetDeleteTracker dt = null;
+  private byte del = KeyValue.Type.Delete.getCode();
+  private byte delCol = KeyValue.Type.DeleteColumn.getCode();
+  private byte delFam = KeyValue.Type.DeleteFamily.getCode();
+
+  protected void setUp() throws Exception {
+    dt = new GetDeleteTracker(KeyValue.KEY_COMPARATOR);
+    col1 = "col".getBytes();
+    col2 = "col2".getBytes();
+    col1Len = col1.length;
+    col2Len = col2.length;
+    
+    empty = new byte[0];
+
+    //ts1
+    ts1 = System.nanoTime();
+    del10 = dt.new Delete(col1, 0, col1Len, del, ts1);
+    del11 = dt.new Delete(col2, 0, col2Len, del, ts1);
+    delQf10 = dt.new Delete(col1, 0, col1Len, delCol, ts1);
+    delQf11 = dt.new Delete(col2, 0, col2Len, delCol, ts1);
+    delFam10 = dt.new Delete(empty, 0, 0, delFam, ts1);
+    
+    //ts2
+    ts2 = System.nanoTime();
+    del20 = dt.new Delete(col1, 0, col1Len, del, ts2);
+    del21 = dt.new Delete(col2, 0, col2Len, del, ts2);
+    delQf20 = dt.new Delete(col1, 0, col1Len, delCol, ts2);
+    delQf21 = dt.new Delete(col2, 0, col2Len, delCol, ts2);
+    delFam20 = dt.new Delete(empty, 0, 0, delFam, ts1);
+    
+    //ts3
+    ts3 = System.nanoTime();
+    del30 = dt.new Delete(col1, 0, col1Len, del, ts3);
+  }
+  
+  public void testUpdate_CompareDeletes() {
+    GetDeleteTracker.DeleteCompare res = null;
+    
+    
+    //Testing Delete and Delete
+    res = dt.compareDeletes(del10, del10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res);
+    
+    //Testing Delete qf1 and Delete qf2 and <==> 
+    res = dt.compareDeletes(del10, del11);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
+    res = dt.compareDeletes(del11, del10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
+        
+    //Testing Delete ts1 and Delete ts2 and <==> 
+    res = dt.compareDeletes(del10, del20);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
+    res = dt.compareDeletes(del20, del10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
+    
+    
+    
+    //Testing DeleteColumn and DeleteColumn
+    res = dt.compareDeletes(delQf10, delQf10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res);
+    
+    //Testing DeleteColumn qf1 and DeleteColumn qf2 and <==> 
+    res = dt.compareDeletes(delQf10, delQf11);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
+    res = dt.compareDeletes(delQf11, delQf10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
+    
+    //Testing DeleteColumn ts1 and DeleteColumn ts2 and <==> 
+    res = dt.compareDeletes(delQf10, delQf20);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_BOTH, res);
+    res = dt.compareDeletes(delQf20, delQf10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res);
+    
+    
+    
+    //Testing Delete and DeleteColumn and <==> 
+    res = dt.compareDeletes(del10, delQf10);
+    assertEquals(DeleteTracker.DeleteCompare.NEXT_OLD, res);
+    res = dt.compareDeletes(delQf10, del10);
+    assertEquals(DeleteTracker.DeleteCompare.NEXT_NEW, res);
+
+    //Testing Delete qf1 and DeleteColumn qf2 and <==> 
+    res = dt.compareDeletes(del10, delQf11);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
+    res = dt.compareDeletes(delQf11, del10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
+    
+    //Testing Delete qf2 and DeleteColumn qf1 and <==> 
+    res = dt.compareDeletes(del11, delQf10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
+    res = dt.compareDeletes(delQf10, del11);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
+    
+    //Testing Delete ts2 and DeleteColumn ts1 and <==> 
+    res = dt.compareDeletes(del20, delQf10);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res);
+    res = dt.compareDeletes(delQf10, del20);
+    assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res);
+ 
+    //Testing Delete ts1 and DeleteColumn ts2 and <==> 
+    res = dt.compareDeletes(del10, delQf20);
+    assertEquals(DeleteTracker.DeleteCompare.NEXT_OLD, res);
+    res = dt.compareDeletes(delQf20, del10);
+    assertEquals(DeleteTracker.DeleteCompare.NEXT_NEW, res);
+    
+  }
+  
+  public void testUpdate(){
+    //Building lists
+    List<Delete> dels1 = new ArrayList<Delete>();
+    dels1.add(delQf10);
+    dels1.add(del21);
+    
+    List<Delete> dels2 = new ArrayList<Delete>();
+    dels2.add(delFam10);
+    dels2.add(del30);
+    dels2.add(delQf20);
+    
+    List<Delete> res = new ArrayList<Delete>();
+    res.add(del30);
+    res.add(delQf20);
+    res.add(del21);
+    
+    //Adding entries
+    for(Delete del : dels1){
+      dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
+          del.timestamp, del.type);
+    }
+    
+    //update()
+    dt.update();
+    
+    //Check deleteList
+    List<Delete> delList = dt.deletes;
+    assertEquals(dels1.size(), delList.size());
+    for(int i=0; i<dels1.size(); i++){
+      assertEquals(0, Bytes.compareTo(dels1.get(i).buffer,
+          delList.get(i).buffer));
+      assertEquals(dels1.get(i).qualifierOffset, delList.get(i).qualifierOffset);
+      assertEquals(dels1.get(i).qualifierLength, delList.get(i).qualifierLength);
+      assertEquals(dels1.get(i).timestamp, delList.get(i).timestamp);
+      assertEquals(dels1.get(i).type, delList.get(i).type);
+    }
+    
+    //Add more entries
+    for(Delete del : dels2){
+      dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
+          del.timestamp, del.type);
+    }
+    //Update()
+    dt.update();
+    
+    //Check deleteList
+    delList = dt.deletes;
+
+    for(int i=0; i<res.size(); i++){
+      assertEquals(0, Bytes.compareTo(res.get(i).buffer,
+          delList.get(i).buffer));
+      assertEquals(res.get(i).qualifierOffset, delList.get(i).qualifierOffset);
+      assertEquals(res.get(i).qualifierLength, delList.get(i).qualifierLength);
+      assertEquals(res.get(i).timestamp, delList.get(i).timestamp);
+      assertEquals(res.get(i).type, delList.get(i).type);
+      if(PRINT){
+        System.out.println("Qf " +new String(delList.get(i).buffer) + 
+            ", timestamp, " +delList.get(i).timestamp+ 
+            ", type " +KeyValue.Type.codeToType(delList.get(i).type));
+      }
+    }
+    
+  }
+  
+  /**
+   * Test if a KeyValue is in the lists of deletes already. Cases that needs to
+   * be tested are:
+   * Not deleted
+   * Deleted by a Delete
+   * Deleted by a DeleteColumn
+   * Deleted by a DeleteFamily
+   */
+  public void testIsDeleted_NotDeleted(){
+    //Building lists
+    List<Delete> dels = new ArrayList<Delete>();
+    dels.add(delQf10);
+    dels.add(del21);
+    
+    //Adding entries
+    for(Delete del : dels){
+      dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
+          del.timestamp, del.type);
+    }
+    
+    //update()
+    dt.update();
+    
+    assertEquals(false, dt.isDeleted(col2, 0, col2Len, ts3));
+    assertEquals(false, dt.isDeleted(col2, 0, col2Len, ts1));
+  }
+  public void testIsDeleted_Delete(){
+    //Building lists
+    List<Delete> dels = new ArrayList<Delete>();
+    dels.add(del21);
+    
+    //Adding entries
+    for(Delete del : dels){
+      dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, 
+          del.timestamp, del.type);
+    }
+    
+    //update()
+    dt.update();
+    
+    assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts2));
+  }
+  
+  public void testIsDeleted_DeleteColumn(){
+    //Building lists
+    List<Delete> dels = new ArrayList<Delete>();
+    dels.add(delQf21);
+    
+    //Adding entries
+    for(Delete del : dels){
+      dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
+          del.timestamp, del.type);
+    }
+    
+    //update()
+    dt.update();
+    
+    assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts1));
+  }
+  
+  public void testIsDeleted_DeleteFamily(){
+    //Building lists
+    List<Delete> dels = new ArrayList<Delete>();
+    dels.add(delFam20);
+    
+    //Adding entries
+    for(Delete del : dels){
+      dt.add(del.buffer, del.qualifierOffset, del.qualifierLength,
+          del.timestamp, del.type);
+    }
+    
+    //update()
+    dt.update();
+    
+    assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts1));
+  }
+  
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java?rev=782445&r1=782444&r2=782445&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java Sun Jun  7 19:57:37 2009
@@ -32,6 +32,7 @@
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile.Reader;
 
+
 /** JUnit test case for HLog */
 public class TestHLog extends HBaseTestCase implements HConstants {
   private Path dir;
@@ -169,7 +170,7 @@
         assertTrue(Bytes.equals(regionName, key.getRegionName()));
         assertTrue(Bytes.equals(tableName, key.getTablename()));
         assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
-        assertTrue(Bytes.equals(HLog.METACOLUMN, val.getColumn()));
+        assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
           val.getValue()));
         System.out.println(key + " " + val);



Mime
View raw message