hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r896138 [8/9] - in /hadoop/hbase/branches/0.20: ./ src/contrib/ src/contrib/indexed/ src/contrib/indexed/lib/ src/contrib/indexed/lib/fmpp-0.19.14/ src/contrib/indexed/src/ src/contrib/indexed/src/fmpp/ src/contrib/indexed/src/fmpp/src/ src...
Date Tue, 05 Jan 2010 17:26:53 GMT
Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestIntegerArrayList.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestIntegerArrayList.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestIntegerArrayList.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestIntegerArrayList.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,349 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.arrays;
+
+import junit.framework.Assert;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.commons.lang.ArrayUtils;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+
+
+public class TestIntegerArrayList extends HBaseTestCase {
+
+
+
+  private static final int[] INVALID_INDEXES = {0, -1, 1};
+
+  /**
+   * Verifies that the initial size constructor initialises as expected.
+   */
+  public void testInitialSizeAndEmpty() {
+    IntegerArrayList test = new IntegerArrayList();
+    checkSizeAndCapacity(test, 0, 1);
+    Assert.assertTrue(test.isEmpty());
+
+    test = new IntegerArrayList(1000);
+    checkSizeAndCapacity(test, 0, 1000);
+    Assert.assertTrue(test.isEmpty());
+
+    test.add((int) 5);
+    Assert.assertFalse(test.isEmpty());
+  }
+
+  /**
+   * Verifies copy constructor.
+   */
+  public void testCopyConstructor() {
+    // Create an original with a capacity of 2, but only one entry
+    IntegerArrayList original = new IntegerArrayList(2);
+    original.add((int) 1);
+    int[] values = (int[]) getField(original, "values");
+    Assert.assertEquals(values.length, 2);
+    Assert.assertEquals(original.size(), 1);
+
+    // Create a copy of the original and check that its size + capacity are the minimum
+    IntegerArrayList copy = new IntegerArrayList(original);
+    Assert.assertEquals(copy.size(), 1);
+    Assert.assertEquals(copy.get(0), (int) 1);
+    values = (int[]) getField(copy, "values");
+    Assert.assertEquals(values.length, 1);
+  }
+
+  /**
+   * Ensures the equals() method behaves as expected.
+   */
+  public void testEquals() {
+    IntegerArrayList test1a = new IntegerArrayList();
+    test1a.add((int) 1);
+    IntegerArrayList test1b = new IntegerArrayList();
+    test1b.add((int) 1);
+    IntegerArrayList test2 = new IntegerArrayList();
+    test2.add((int) 2);
+
+    Assert.assertTrue(test1a.equals(test1b));
+    Assert.assertFalse(test1a.equals(test2));
+  }
+
+
+  /**
+   * Ensures the number of elements in the list and its backing capacity are what we expect.
+   *
+   * @param test     the list to test
+   * @param size     the expected number of elements in the list
+   * @param capacity the expected capacity
+   */
+  private void checkSizeAndCapacity(IntegerArrayList test, int size, int capacity) {
+    Assert.assertEquals(test.size(), size);
+
+    int[] values = (int[]) getField(test, "values");
+
+    Assert.assertEquals(values.length, capacity);
+  }
+
+  /**
+   * Tests that adding elements grows the array size and capacity as expected.
+   */
+  public void testAddGetAndGrow() {
+    // Initialise
+    IntegerArrayList test = new IntegerArrayList();
+    checkSizeAndCapacity(test, 0, 1);
+
+    // Add the first element and we expect the capacity to be unchanged since we don't have any spots consumed.
+    test.add((int) 1);
+    Assert.assertEquals(test.get(0), (int) 1);
+    checkSizeAndCapacity(test, 1, 1);
+
+    // Add the next element and we expect the capacity to grow by one only
+    test.add((int) 2);
+    Assert.assertEquals(test.get(1), (int) 2);
+    checkSizeAndCapacity(test, 2, 2);
+
+    // Add the next element and we expect the capacity to grow by two
+    test.add((int) 3);
+    Assert.assertEquals(test.get(2), (int) 3);
+    checkSizeAndCapacity(test, 3, 4);
+
+    // Add the next element and we expect the capacity to be unchanged
+    test.add((int) 4);
+    Assert.assertEquals(test.get(3), (int) 4);
+    checkSizeAndCapacity(test, 4, 4);
+
+    // Add the next element and we expect the capacity to be 1.5+1 times larger
+    test.add((int) 5);
+    Assert.assertEquals(test.get(4), (int) 5);
+    checkSizeAndCapacity(test, 5, 7);
+  }
+
+  /**
+   * Tests get() with various invalid ranges.
+   */
+  public void testInvalidGet() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        IntegerArrayList test = new IntegerArrayList();
+        test.get(index);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+         continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+
+  /**
+   * Tests the indexOf() and set() methods.
+   */
+  public void testIndexOfAndSet() {
+    IntegerArrayList test = new IntegerArrayList();
+
+    // Test with first value added to list
+    int testValue = (int) 42;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(test.indexOf(testValue), 0);
+
+    // Add a second one
+    testValue = (int) 43;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(test.indexOf(testValue), 1);
+
+    // Change the first to a new value
+    testValue = (int) 41;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.set(0, testValue);
+    Assert.assertEquals(test.indexOf(testValue), 0);
+  }
+
+  /**
+   * Tests the Searchable implementation.
+   */
+  public void testSearchable() {
+    IntegerArrayList test = new IntegerArrayList();
+
+    // Test with first value added to list
+    int testValue = (int) 42;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), 0);
+
+    // Add a second one
+    testValue = (int) 43;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -2);
+    test.add(testValue);
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), 1);
+
+    // Search for something off the start
+    testValue = (int) 41;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -1);
+  }
+
+  /**
+   * Tests set() with various invalid ranges.
+   */
+  public void testInvalidSet() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        IntegerArrayList test = new IntegerArrayList();
+        test.set(index, (int) 0);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+        continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+
+  /**
+   * Tests iteration via the Iterable interface.
+   */
+  public void testIterable() {
+    final java.util.List<Integer> testData = new ArrayList<Integer>();
+
+    // Test with no content first
+    IntegerArrayList test = new IntegerArrayList();
+    testData.clear();
+    for (int item : test) {
+      testData.add(item);
+    }
+    Assert.assertEquals(testData.size(), 0);
+
+    // Add a value and ensure it is returned
+    test.add((int) 1);
+    testData.clear();
+    for (int item : test) {
+      testData.add(item);
+    }
+    Assert.assertTrue(ArrayUtils.isEquals(testData.toArray(),
+      new Object[]{(int) 1}));
+
+    // Add another value and ensure it is returned
+    test.add((int) 1);
+    testData.clear();
+    for (int item : test) {
+      testData.add(item);
+    }
+    Assert.assertTrue(ArrayUtils.isEquals(testData.toArray(),
+      new Object[]{(int) 1, (int) 1}));
+  }
+
+  /**
+   * Tests the remove() method.
+   */
+  public void testRemove() {
+    IntegerArrayList test = new IntegerArrayList();
+    test.add((int) 1);
+    Assert.assertEquals(test.get(0), (int) 1);
+    //Assert.assertEquals(test.get(0), (int) 1);
+    test.remove(0);
+    Assert.assertTrue(test.isEmpty());
+
+    // Add some
+    test.add((int) 0);
+    test.add((int) 1);
+    test.add((int) 2);
+
+    // Remove a value from the middle and ensure correct operation
+    Assert.assertEquals(test.remove(1), (int) 1);
+    Assert.assertEquals(test.get(0), (int) 0);
+    Assert.assertEquals(test.get(1), (int) 2);
+  }
+
+  /**
+   * Tests the remove() method.
+   */
+  public void testInsert() {
+    IntegerArrayList test = new IntegerArrayList();
+    test.insert(0, (int) 1);
+    Assert.assertEquals(test.get(0), (int) 1);
+    Assert.assertEquals(test.size(), 1);
+
+    test.insert(0, (int) 0);
+    Assert.assertEquals(test.get(0), (int) 0);
+    Assert.assertEquals(test.get(1), (int) 1);
+    Assert.assertEquals(test.size(), 2);
+
+    test.insert(1, (int) 2);
+    Assert.assertEquals(test.get(0), (int) 0);
+    Assert.assertEquals(test.get(1), (int) 2);
+    Assert.assertEquals(test.get(2), (int) 1);
+    Assert.assertEquals(test.size(), 3);
+
+    test.insert(3, (int) 3);
+    Assert.assertEquals(test.get(0), (int) 0);
+    Assert.assertEquals(test.get(1), (int) 2);
+    Assert.assertEquals(test.get(2), (int) 1);
+    Assert.assertEquals(test.get(3), (int) 3);
+    Assert.assertEquals(test.size(), 4);
+  }
+
+  /**
+   * Verifies the removeLast() method works as expected.
+   */
+  public void testRemoveLast() {
+    IntegerArrayList test = new IntegerArrayList();
+    test.add((int) 1);
+    test.add((int) 2);
+
+    Assert.assertEquals(test.removeLast(), (int) 2);
+    Assert.assertEquals(test.get(0), (int) 1);
+
+    Assert.assertEquals(test.removeLast(), (int) 1);
+    Assert.assertTrue(test.isEmpty());
+  }
+
+  /**
+   * Tests remove() with various invalid ranges.
+   */
+  public void testInvalidRemove() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        IntegerArrayList test = new IntegerArrayList();
+        test.remove(index);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+        continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+  /**
+   * Extracts a declared field from a given object.
+   *
+   * @param target the object from which to extract the field
+   * @param name   the name of the field
+   * @return the declared field
+   */
+  public static Object getField(Object target, String name) {
+    try {
+      Field field = target.getClass().getDeclaredField(name);
+      field.setAccessible(true);
+      return field.get(target);
+    } catch (IllegalAccessException e) {
+      Assert.fail("Exception " + e);
+    } catch (NoSuchFieldException e) {
+      Assert.fail("Exception " + e);
+    }
+    return null;
+  }
+
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestLongArrayList.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestLongArrayList.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestLongArrayList.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestLongArrayList.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,349 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.arrays;
+
+import junit.framework.Assert;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.commons.lang.ArrayUtils;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+
+
+public class TestLongArrayList extends HBaseTestCase {
+
+
+
+  private static final int[] INVALID_INDEXES = {0, -1, 1};
+
+  /**
+   * Verifies that the initial size constructor initialises as expected.
+   */
+  public void testInitialSizeAndEmpty() {
+    LongArrayList test = new LongArrayList();
+    checkSizeAndCapacity(test, 0, 1);
+    Assert.assertTrue(test.isEmpty());
+
+    test = new LongArrayList(1000);
+    checkSizeAndCapacity(test, 0, 1000);
+    Assert.assertTrue(test.isEmpty());
+
+    test.add((long) 5);
+    Assert.assertFalse(test.isEmpty());
+  }
+
+  /**
+   * Verifies copy constructor.
+   */
+  public void testCopyConstructor() {
+    // Create an original with a capacity of 2, but only one entry
+    LongArrayList original = new LongArrayList(2);
+    original.add((long) 1);
+    long[] values = (long[]) getField(original, "values");
+    Assert.assertEquals(values.length, 2);
+    Assert.assertEquals(original.size(), 1);
+
+    // Create a copy of the original and check that its size + capacity are the minimum
+    LongArrayList copy = new LongArrayList(original);
+    Assert.assertEquals(copy.size(), 1);
+    Assert.assertEquals(copy.get(0), (long) 1);
+    values = (long[]) getField(copy, "values");
+    Assert.assertEquals(values.length, 1);
+  }
+
+  /**
+   * Ensures the equals() method behaves as expected.
+   */
+  public void testEquals() {
+    LongArrayList test1a = new LongArrayList();
+    test1a.add((long) 1);
+    LongArrayList test1b = new LongArrayList();
+    test1b.add((long) 1);
+    LongArrayList test2 = new LongArrayList();
+    test2.add((long) 2);
+
+    Assert.assertTrue(test1a.equals(test1b));
+    Assert.assertFalse(test1a.equals(test2));
+  }
+
+
+  /**
+   * Ensures the number of elements in the list and its backing capacity are what we expect.
+   *
+   * @param test     the list to test
+   * @param size     the expected number of elements in the list
+   * @param capacity the expected capacity
+   */
+  private void checkSizeAndCapacity(LongArrayList test, int size, int capacity) {
+    Assert.assertEquals(test.size(), size);
+
+    long[] values = (long[]) getField(test, "values");
+
+    Assert.assertEquals(values.length, capacity);
+  }
+
+  /**
+   * Tests that adding elements grows the array size and capacity as expected.
+   */
+  public void testAddGetAndGrow() {
+    // Initialise
+    LongArrayList test = new LongArrayList();
+    checkSizeAndCapacity(test, 0, 1);
+
+    // Add the first element and we expect the capacity to be unchanged since we don't have any spots consumed.
+    test.add((long) 1);
+    Assert.assertEquals(test.get(0), (long) 1);
+    checkSizeAndCapacity(test, 1, 1);
+
+    // Add the next element and we expect the capacity to grow by one only
+    test.add((long) 2);
+    Assert.assertEquals(test.get(1), (long) 2);
+    checkSizeAndCapacity(test, 2, 2);
+
+    // Add the next element and we expect the capacity to grow by two
+    test.add((long) 3);
+    Assert.assertEquals(test.get(2), (long) 3);
+    checkSizeAndCapacity(test, 3, 4);
+
+    // Add the next element and we expect the capacity to be unchanged
+    test.add((long) 4);
+    Assert.assertEquals(test.get(3), (long) 4);
+    checkSizeAndCapacity(test, 4, 4);
+
+    // Add the next element and we expect the capacity to be 1.5+1 times larger
+    test.add((long) 5);
+    Assert.assertEquals(test.get(4), (long) 5);
+    checkSizeAndCapacity(test, 5, 7);
+  }
+
+  /**
+   * Tests get() with various invalid ranges.
+   */
+  public void testInvalidGet() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        LongArrayList test = new LongArrayList();
+        test.get(index);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+         continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+
+  /**
+   * Tests the indexOf() and set() methods.
+   */
+  public void testIndexOfAndSet() {
+    LongArrayList test = new LongArrayList();
+
+    // Test with first value added to list
+    long testValue = (long) 42;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(test.indexOf(testValue), 0);
+
+    // Add a second one
+    testValue = (long) 43;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(test.indexOf(testValue), 1);
+
+    // Change the first to a new value
+    testValue = (long) 41;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.set(0, testValue);
+    Assert.assertEquals(test.indexOf(testValue), 0);
+  }
+
+  /**
+   * Tests the Searchable implementation.
+   */
+  public void testSearchable() {
+    LongArrayList test = new LongArrayList();
+
+    // Test with first value added to list
+    long testValue = (long) 42;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), 0);
+
+    // Add a second one
+    testValue = (long) 43;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -2);
+    test.add(testValue);
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), 1);
+
+    // Search for something off the start
+    testValue = (long) 41;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -1);
+  }
+
+  /**
+   * Tests set() with various invalid ranges.
+   */
+  public void testInvalidSet() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        LongArrayList test = new LongArrayList();
+        test.set(index, (long) 0);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+        continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+
+  /**
+   * Tests iteration via the Iterable interface.
+   */
+  public void testIterable() {
+    final java.util.List<Long> testData = new ArrayList<Long>();
+
+    // Test with no content first
+    LongArrayList test = new LongArrayList();
+    testData.clear();
+    for (long item : test) {
+      testData.add(item);
+    }
+    Assert.assertEquals(testData.size(), 0);
+
+    // Add a value and ensure it is returned
+    test.add((long) 1);
+    testData.clear();
+    for (long item : test) {
+      testData.add(item);
+    }
+    Assert.assertTrue(ArrayUtils.isEquals(testData.toArray(),
+      new Object[]{(long) 1}));
+
+    // Add another value and ensure it is returned
+    test.add((long) 1);
+    testData.clear();
+    for (long item : test) {
+      testData.add(item);
+    }
+    Assert.assertTrue(ArrayUtils.isEquals(testData.toArray(),
+      new Object[]{(long) 1, (long) 1}));
+  }
+
+  /**
+   * Tests the remove() method.
+   */
+  public void testRemove() {
+    LongArrayList test = new LongArrayList();
+    test.add((long) 1);
+    Assert.assertEquals(test.get(0), (long) 1);
+    //Assert.assertEquals(test.get(0), (long) 1);
+    test.remove(0);
+    Assert.assertTrue(test.isEmpty());
+
+    // Add some
+    test.add((long) 0);
+    test.add((long) 1);
+    test.add((long) 2);
+
+    // Remove a value from the middle and ensure correct operation
+    Assert.assertEquals(test.remove(1), (long) 1);
+    Assert.assertEquals(test.get(0), (long) 0);
+    Assert.assertEquals(test.get(1), (long) 2);
+  }
+
+  /**
+   * Tests the remove() method.
+   */
+  public void testInsert() {
+    LongArrayList test = new LongArrayList();
+    test.insert(0, (long) 1);
+    Assert.assertEquals(test.get(0), (long) 1);
+    Assert.assertEquals(test.size(), 1);
+
+    test.insert(0, (long) 0);
+    Assert.assertEquals(test.get(0), (long) 0);
+    Assert.assertEquals(test.get(1), (long) 1);
+    Assert.assertEquals(test.size(), 2);
+
+    test.insert(1, (long) 2);
+    Assert.assertEquals(test.get(0), (long) 0);
+    Assert.assertEquals(test.get(1), (long) 2);
+    Assert.assertEquals(test.get(2), (long) 1);
+    Assert.assertEquals(test.size(), 3);
+
+    test.insert(3, (long) 3);
+    Assert.assertEquals(test.get(0), (long) 0);
+    Assert.assertEquals(test.get(1), (long) 2);
+    Assert.assertEquals(test.get(2), (long) 1);
+    Assert.assertEquals(test.get(3), (long) 3);
+    Assert.assertEquals(test.size(), 4);
+  }
+
+  /**
+   * Verifies the removeLast() method works as expected.
+   */
+  public void testRemoveLast() {
+    LongArrayList test = new LongArrayList();
+    test.add((long) 1);
+    test.add((long) 2);
+
+    Assert.assertEquals(test.removeLast(), (long) 2);
+    Assert.assertEquals(test.get(0), (long) 1);
+
+    Assert.assertEquals(test.removeLast(), (long) 1);
+    Assert.assertTrue(test.isEmpty());
+  }
+
+  /**
+   * Tests remove() with various invalid ranges.
+   */
+  public void testInvalidRemove() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        LongArrayList test = new LongArrayList();
+        test.remove(index);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+        continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+  /**
+   * Extracts a declared field from a given object.
+   *
+   * @param target the object from which to extract the field
+   * @param name   the name of the field
+   * @return the declared field
+   */
+  public static Object getField(Object target, String name) {
+    try {
+      Field field = target.getClass().getDeclaredField(name);
+      field.setAccessible(true);
+      return field.get(target);
+    } catch (IllegalAccessException e) {
+      Assert.fail("Exception " + e);
+    } catch (NoSuchFieldException e) {
+      Assert.fail("Exception " + e);
+    }
+    return null;
+  }
+
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestShortArrayList.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestShortArrayList.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestShortArrayList.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestShortArrayList.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,349 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.arrays;
+
+import junit.framework.Assert;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.commons.lang.ArrayUtils;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+
+
+public class TestShortArrayList extends HBaseTestCase {
+
+
+
+  private static final int[] INVALID_INDEXES = {0, -1, 1};
+
+  /**
+   * Verifies that the initial size constructor initialises as expected.
+   */
+  public void testInitialSizeAndEmpty() {
+    ShortArrayList test = new ShortArrayList();
+    checkSizeAndCapacity(test, 0, 1);
+    Assert.assertTrue(test.isEmpty());
+
+    test = new ShortArrayList(1000);
+    checkSizeAndCapacity(test, 0, 1000);
+    Assert.assertTrue(test.isEmpty());
+
+    test.add((short) 5);
+    Assert.assertFalse(test.isEmpty());
+  }
+
+  /**
+   * Verifies copy constructor.
+   */
+  public void testCopyConstructor() {
+    // Create an original with a capacity of 2, but only one entry
+    ShortArrayList original = new ShortArrayList(2);
+    original.add((short) 1);
+    short[] values = (short[]) getField(original, "values");
+    Assert.assertEquals(values.length, 2);
+    Assert.assertEquals(original.size(), 1);
+
+    // Create a copy of the original and check that its size + capacity are the minimum
+    ShortArrayList copy = new ShortArrayList(original);
+    Assert.assertEquals(copy.size(), 1);
+    Assert.assertEquals(copy.get(0), (short) 1);
+    values = (short[]) getField(copy, "values");
+    Assert.assertEquals(values.length, 1);
+  }
+
+  /**
+   * Ensures the equals() method behaves as expected.
+   */
+  public void testEquals() {
+    ShortArrayList test1a = new ShortArrayList();
+    test1a.add((short) 1);
+    ShortArrayList test1b = new ShortArrayList();
+    test1b.add((short) 1);
+    ShortArrayList test2 = new ShortArrayList();
+    test2.add((short) 2);
+
+    Assert.assertTrue(test1a.equals(test1b));
+    Assert.assertFalse(test1a.equals(test2));
+  }
+
+
+  /**
+   * Ensures the number of elements in the list and its backing capacity are what we expect.
+   *
+   * @param test     the list to test
+   * @param size     the expected number of elements in the list
+   * @param capacity the expected capacity
+   */
+  private void checkSizeAndCapacity(ShortArrayList test, int size, int capacity) {
+    Assert.assertEquals(test.size(), size);
+
+    short[] values = (short[]) getField(test, "values");
+
+    Assert.assertEquals(values.length, capacity);
+  }
+
+  /**
+   * Tests that adding elements grows the array size and capacity as expected.
+   */
+  public void testAddGetAndGrow() {
+    // Initialise
+    ShortArrayList test = new ShortArrayList();
+    checkSizeAndCapacity(test, 0, 1);
+
+    // Add the first element and we expect the capacity to be unchanged since we don't have any spots consumed.
+    test.add((short) 1);
+    Assert.assertEquals(test.get(0), (short) 1);
+    checkSizeAndCapacity(test, 1, 1);
+
+    // Add the next element and we expect the capacity to grow by one only
+    test.add((short) 2);
+    Assert.assertEquals(test.get(1), (short) 2);
+    checkSizeAndCapacity(test, 2, 2);
+
+    // Add the next element and we expect the capacity to grow by two
+    test.add((short) 3);
+    Assert.assertEquals(test.get(2), (short) 3);
+    checkSizeAndCapacity(test, 3, 4);
+
+    // Add the next element and we expect the capacity to be unchanged
+    test.add((short) 4);
+    Assert.assertEquals(test.get(3), (short) 4);
+    checkSizeAndCapacity(test, 4, 4);
+
+    // Add the next element and we expect the capacity to be 1.5+1 times larger
+    test.add((short) 5);
+    Assert.assertEquals(test.get(4), (short) 5);
+    checkSizeAndCapacity(test, 5, 7);
+  }
+
+  /**
+   * Tests get() with various invalid ranges.
+   */
+  public void testInvalidGet() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        ShortArrayList test = new ShortArrayList();
+        test.get(index);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+         continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+
+  /**
+   * Tests the indexOf() and set() methods.
+   */
+  public void testIndexOfAndSet() {
+    ShortArrayList test = new ShortArrayList();
+
+    // Test with first value added to list
+    short testValue = (short) 42;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(test.indexOf(testValue), 0);
+
+    // Add a second one
+    testValue = (short) 43;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(test.indexOf(testValue), 1);
+
+    // Change the first to a new value
+    testValue = (short) 41;
+    Assert.assertEquals(test.indexOf(testValue), -1);
+    test.set(0, testValue);
+    Assert.assertEquals(test.indexOf(testValue), 0);
+  }
+
+  /**
+   * Tests the Searchable implementation.
+   */
+  public void testSearchable() {
+    ShortArrayList test = new ShortArrayList();
+
+    // Test with first value added to list
+    short testValue = (short) 42;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -1);
+    test.add(testValue);
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), 0);
+
+    // Add a second one
+    testValue = (short) 43;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -2);
+    test.add(testValue);
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), 1);
+
+    // Search for something off the start
+    testValue = (short) 41;
+    Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -1);
+  }
+
+  /**
+   * Tests set() with various invalid ranges.
+   */
+  public void testInvalidSet() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        ShortArrayList test = new ShortArrayList();
+        test.set(index, (short) 0);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+        continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+
+  /**
+   * Tests iteration via the Iterable interface.
+   */
+  public void testIterable() {
+    final java.util.List<Short> testData = new ArrayList<Short>();
+
+    // Test with no content first
+    ShortArrayList test = new ShortArrayList();
+    testData.clear();
+    for (short item : test) {
+      testData.add(item);
+    }
+    Assert.assertEquals(testData.size(), 0);
+
+    // Add a value and ensure it is returned
+    test.add((short) 1);
+    testData.clear();
+    for (short item : test) {
+      testData.add(item);
+    }
+    Assert.assertTrue(ArrayUtils.isEquals(testData.toArray(),
+      new Object[]{(short) 1}));
+
+    // Add another value and ensure it is returned
+    test.add((short) 1);
+    testData.clear();
+    for (short item : test) {
+      testData.add(item);
+    }
+    Assert.assertTrue(ArrayUtils.isEquals(testData.toArray(),
+      new Object[]{(short) 1, (short) 1}));
+  }
+
+  /**
+   * Tests the remove() method.
+   */
+  public void testRemove() {
+    ShortArrayList test = new ShortArrayList();
+    test.add((short) 1);
+    Assert.assertEquals(test.get(0), (short) 1);
+    //Assert.assertEquals(test.get(0), (short) 1);
+    test.remove(0);
+    Assert.assertTrue(test.isEmpty());
+
+    // Add some
+    test.add((short) 0);
+    test.add((short) 1);
+    test.add((short) 2);
+
+    // Remove a value from the middle and ensure correct operation
+    Assert.assertEquals(test.remove(1), (short) 1);
+    Assert.assertEquals(test.get(0), (short) 0);
+    Assert.assertEquals(test.get(1), (short) 2);
+  }
+
+  /**
+   * Tests the remove() method.
+   */
+  public void testInsert() {
+    ShortArrayList test = new ShortArrayList();
+    test.insert(0, (short) 1);
+    Assert.assertEquals(test.get(0), (short) 1);
+    Assert.assertEquals(test.size(), 1);
+
+    test.insert(0, (short) 0);
+    Assert.assertEquals(test.get(0), (short) 0);
+    Assert.assertEquals(test.get(1), (short) 1);
+    Assert.assertEquals(test.size(), 2);
+
+    test.insert(1, (short) 2);
+    Assert.assertEquals(test.get(0), (short) 0);
+    Assert.assertEquals(test.get(1), (short) 2);
+    Assert.assertEquals(test.get(2), (short) 1);
+    Assert.assertEquals(test.size(), 3);
+
+    test.insert(3, (short) 3);
+    Assert.assertEquals(test.get(0), (short) 0);
+    Assert.assertEquals(test.get(1), (short) 2);
+    Assert.assertEquals(test.get(2), (short) 1);
+    Assert.assertEquals(test.get(3), (short) 3);
+    Assert.assertEquals(test.size(), 4);
+  }
+
+  /**
+   * Verifies the removeLast() method works as expected.
+   */
+  public void testRemoveLast() {
+    ShortArrayList test = new ShortArrayList();
+    test.add((short) 1);
+    test.add((short) 2);
+
+    Assert.assertEquals(test.removeLast(), (short) 2);
+    Assert.assertEquals(test.get(0), (short) 1);
+
+    Assert.assertEquals(test.removeLast(), (short) 1);
+    Assert.assertTrue(test.isEmpty());
+  }
+
+  /**
+   * Tests remove() with various invalid ranges.
+   */
+  public void testInvalidRemove() {
+    for (int index : INVALID_INDEXES) {
+      try {
+        ShortArrayList test = new ShortArrayList();
+        test.remove(index);
+      } catch (ArrayIndexOutOfBoundsException ignored) {
+        continue;
+      }
+      Assert.fail("Expected an array index out of bounds exception");
+    }
+  }
+
+  /**
+   * Extracts a declared field from a given object.
+   *
+   * @param target the object from which to extract the field
+   * @param name   the name of the field
+   * @return the declared field
+   */
+  public static Object getField(Object target, String name) {
+    try {
+      Field field = target.getClass().getDeclaredField(name);
+      field.setAccessible(true);
+      return field.get(target);
+    } catch (IllegalAccessException e) {
+      Assert.fail("Exception " + e);
+    } catch (NoSuchFieldException e) {
+      Assert.fail("Exception " + e);
+    }
+    return null;
+  }
+
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,292 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import junit.framework.Assert;
+import junit.framework.AssertionFailedError;
+import org.apache.hadoop.hbase.HBaseTestCase;
+
+/**
+ * A base test-case of {@link IntSets}.
+ */
+public abstract class IntSetBaseTestCase extends HBaseTestCase {
+
+  private static final int[] SOME = new int[]{0, 10, 63, 64, 99, 103, 104, 200,
+    800, 805};
+
+  /**
+   * Creates a sparse bitset with the given elements and maximum.
+   *
+   * @param capacity       the maximum
+   * @param sortedElements the elements assumed to be sorted
+   * @return the new sparse bitset.
+   */
+  public static IntSetBase createSparseBitSet(int capacity,
+    int... sortedElements) {
+    SparseBitSet bitSet = new SparseBitSet();
+    for (int element : sortedElements) {
+      bitSet.addNext(element);
+    }
+    bitSet.setCapacity(capacity);
+    return bitSet;
+  }
+
+  /**
+   * Creates a bitset with the given elements and maximum.
+   *
+   * @param capacity       the maximum
+   * @param sortedElements the elements assumed to be sorted
+   * @return the new bitset.
+   */
+  public static IntSetBase createBitSet(int capacity, int... sortedElements) {
+    BitSet bitSet = new BitSet(capacity);
+    for (int element : sortedElements) {
+      bitSet.addNext(element);
+    }
+    return bitSet;
+  }
+
+  protected abstract IntSetBase newSet(int capacity, int... sortedElements);
+
+  protected void addSome(IntSetBase bitSet) {
+    for (int next : SOME) {
+      bitSet.addNext(next);
+    }
+  }
+
+  protected static void fill(IntSetBase bitSet) {
+    for (int i = 0; i < bitSet.capacity(); i++) {
+      bitSet.addNext(i);
+    }
+  }
+
+
+  protected void assertSetsEqual(IntSet set1, IntSet set2) {
+    Assert.assertEquals(set1.capacity(), set2.capacity());
+    Assert.assertEquals(set1.size(), set2.size());
+    IntSet.IntSetIterator iter1 = set1.iterator(), iter2 = set2.iterator();
+    while (iter1.hasNext() || iter2.hasNext()) {
+      Assert.assertEquals(iter1.next(), iter2.next());
+    }
+  }
+
+  protected void assertSetsNotEqual(IntSetBase set1, IntSetBase set2) {
+    try {
+      assertSetsEqual(set1, set2);
+      fail("Sets are equal");
+    } catch (AssertionFailedError ignored) {
+    }
+  }
+
+  public void testAdd() {
+    IntSetBase bitSet = newSet(1000);
+
+    try {
+      bitSet.addNext(-1);
+      Assert.fail("expected an error");
+    } catch (AssertionError ignored) {
+    }
+
+    addSome(bitSet);
+    Assert.assertEquals(bitSet.size(), SOME.length);
+
+    try {
+      bitSet.addNext(805);
+      Assert.fail("expected an error");
+    } catch (AssertionError ignored) {
+    }
+
+    try {
+      bitSet.addNext(1000);
+      Assert.fail("expected an error");
+    } catch (AssertionError ignored) {
+    }
+  }
+
+  public void testContains() {
+    IntSetBase intSet = newSet(1000);
+    addSome(intSet);
+    for (int next : SOME) {
+      Assert.assertTrue(intSet.contains(next));
+    }
+
+    int sum = 0;
+    for (int i = 0; i < intSet.capacity(); i++) {
+      sum += intSet.contains(i) ? 1 : 0;
+    }
+    Assert.assertEquals(sum, SOME.length);
+  }
+
+  public void testClear() {
+    IntSetBase intSet = newSet(1000);
+    Assert.assertEquals(intSet.size(), 0);
+    Assert.assertTrue(intSet.isEmpty());
+    intSet.clear();
+
+    addClearAndCheck(intSet);
+    addClearAndCheck(intSet);
+  }
+
+
+  private void addClearAndCheck(IntSetBase intSetBase) {
+    addSome(intSetBase);
+    Assert.assertEquals(intSetBase.size(), SOME.length);
+    Assert.assertFalse(intSetBase.isEmpty());
+    intSetBase.clear();
+    Assert.assertEquals(intSetBase.size(), 0);
+    Assert.assertTrue(intSetBase.isEmpty());
+  }
+
+  public void testClone() {
+    IntSetBase intSet = newSet(10000);
+    IntSetBase otherIntSet = (IntSetBase) intSet.clone();
+    assertSetsEqual(intSet, otherIntSet);
+
+    addSome(intSet);
+    assertSetsNotEqual(intSet, otherIntSet);
+
+    otherIntSet = (IntSetBase) intSet.clone();
+    assertSetsEqual(intSet, otherIntSet);
+
+    intSet.addNext(1001);
+    Assert.assertEquals(intSet.size(), otherIntSet.size() + 1);
+    Assert.assertFalse(otherIntSet.contains(1001));
+  }
+
+  public void testIterator() {
+    IntSetBase intSet = newSet(1000);
+    IntSet.IntSetIterator iter = intSet.iterator();
+    Assert.assertFalse(iter.hasNext());
+
+    addSome(intSet);
+    iter = intSet.iterator();
+    for (int num : SOME) {
+      Assert.assertTrue(iter.hasNext());
+      Assert.assertEquals(num, iter.next());
+    }
+    Assert.assertFalse(iter.hasNext());
+
+    intSet = new BitSet(1000);
+    fill(intSet);
+    iter = intSet.iterator();
+    for (int num = 0; num < 1000; num++) {
+      Assert.assertTrue(iter.hasNext());
+      Assert.assertEquals(num, iter.next());
+    }
+    Assert.assertFalse(iter.hasNext());
+  }
+
+
+  public void testComplement() {
+    for (int capacity = 950; capacity < 1050; capacity++) {
+      IntSetBase intSet = newSet(capacity);
+      Assert.assertEquals(intSet.size(), 0);
+      Assert.assertEquals(intSet.complement().size(), capacity);
+    }
+
+    IntSetBase intSet = newSet(1001);
+    addSome(intSet);
+    BitSet cBitSet = (BitSet) intSet.clone().complement();
+    Assert.assertEquals(cBitSet.size() + intSet.size(), 1001);
+    for (int i = 0; i < 1001; i++) {
+      Assert.assertTrue(intSet.contains(i) != cBitSet.contains(i));
+    }
+  }
+
+  public void testIntersect() {
+    IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643, 765,
+      999);
+    IntSetBase intset2 = newSet(1013);
+
+    Assert.assertTrue(intset1.clone().intersect(intset2).isEmpty());
+    Assert.assertTrue(intset2.clone().intersect(intset1).isEmpty());
+
+    assertSetsEqual(intset1.clone().intersect(intset1.clone()), intset1);
+    intset2 = newSet(1013);
+    fill(intset2);
+    assertSetsEqual(intset1.clone().intersect(intset2), intset1);
+
+    assertSetsEqual(intset1.clone().intersect(newSet(1013, 34, 63, 64, 65, 107,
+      244, 340, 765, 894, 1012)),
+      newSet(1013, 34, 244, 765));
+  }
+
+
+  public void testUnite() {
+    IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643, 765,
+      999);
+    IntSetBase intset2 = newSet(1013);
+
+    assertSetsEqual(intset1.clone().unite(intset2), intset1);
+    assertSetsEqual(intset2.clone().unite(intset1), intset1);
+
+    assertSetsEqual(intset1.clone().unite(intset1.clone()), intset1);
+    intset2 = newSet(1013);
+    fill(intset2);
+    assertSetsEqual(intset1.clone().unite(intset2), intset2);
+
+    assertSetsEqual(intset1.clone().unite(newSet(1013, 34, 63, 64, 65, 107, 244,
+      340, 765, 894, 1012)),
+      newSet(1013, 3, 7, 34, 63, 64, 65, 87, 107, 178, 244, 340, 507, 643, 765
+        , 894, 999, 1012));
+  }
+
+  public void testSubtract() {
+    IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643,
+      765, 999);
+    IntSetBase intset2 = newSet(1013);
+
+    assertSetsEqual(intset1.clone().subtract(intset2), intset1);
+    assertSetsEqual(intset2.clone().subtract(intset1), intset2);
+
+    assertSetsEqual(intset1.clone().subtract(intset1.clone()), intset2);
+    intset2 = newSet(1013);
+    fill(intset2);
+    assertSetsEqual(intset1.clone().subtract(intset2), newSet(1013));
+    assertSetsEqual(intset2.clone().subtract(intset1),
+      intset1.clone().complement());
+
+    assertSetsEqual(intset1.clone().subtract(newSet(1013, 34, 63, 64, 65,
+      107, 244, 340, 765, 894, 1012)),
+      newSet(1013, 3, 7, 87, 178, 507, 643, 999));
+  }
+
+  public void testDifference() {
+    IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643,
+      765, 999);
+    IntSetBase intset2 = newSet(1013);
+
+    assertSetsEqual(intset1.clone().difference(intset2), intset1);
+    assertSetsEqual(intset2.clone().difference(intset1), intset1);
+
+    assertSetsEqual(intset1.clone().difference(intset1.clone()), intset2);
+    intset2 = newSet(1013);
+    fill(intset2);
+    assertSetsEqual(intset1.clone().difference(intset2),
+      intset1.clone().complement());
+    assertSetsEqual(intset2.clone().difference(intset1),
+      intset1.clone().complement());
+
+    assertSetsEqual(intset1.clone().difference(newSet(1013, 34, 63, 64, 65,
+      107, 244, 340, 765, 894, 1012)),
+      newSet(1013, 3, 7, 63, 64, 65, 87, 107, 178, 340, 507, 643, 894,
+        999, 1012));
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,44 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import org.apache.hadoop.hbase.util.ClassSize;
+
+/**
+ * Tests the {@link BitSet}.
+ */
+public class TestBitSet extends IntSetBaseTestCase {
+
+
+  @Override
+  protected IntSetBase newSet(int capacity, int... sortedElements) {
+    return createBitSet(capacity, sortedElements);
+  }
+
+  /**
+   * Tests that the heap size estimate of the fixed parts matches the
+   * FIXED SIZE constant.
+   */
+  public void testHeapSize() {
+    assertEquals(ClassSize.estimateBase(BitSet.class, false),
+      BitSet.FIXED_SIZE);
+  }
+
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,43 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import org.apache.hadoop.hbase.util.ClassSize;
+
+/**
+ * Tests the {@link SparseBitSet} implementation.
+ */
+public class TestSparseBitSet extends IntSetBaseTestCase {
+
+  @Override
+  protected IntSetBase newSet(int max, int... sortedElements) {
+    return createSparseBitSet(max, sortedElements);
+  }
+
+  /**
+   * Tests that the heap size estimate of the fixed parts matches the
+   * FIXED SIZE constant.
+   */
+  public void testHeapSize() {
+    assertEquals(ClassSize.estimateBase(SparseBitSet.class, false),
+      SparseBitSet.FIXED_SIZE);
+  }
+
+}
\ No newline at end of file

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java?rev=896138&r1=896137&r2=896138&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java Tue Jan  5 17:26:49 2010
@@ -111,6 +111,9 @@
   /** Default region server interface class name. */
   static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName();
 
+  /** Parameter name for what region implementation to use. */
+  static final String REGION_IMPL= "hbase.hregion.impl";
+
   /** Parameter name for how often threads should wake up */
   static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
   

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java?rev=896138&r1=896137&r2=896138&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java Tue Jan  5 17:26:49 2010
@@ -150,12 +150,12 @@
       for (int i = 0; i < info.length - 1; i++) {
         if (currentRegion == null) {
           currentRegion =
-            new HRegion(tabledir, hlog, fs, conf, info[i], null);
+            HRegion.newHRegion(tabledir, hlog, fs, conf, info[i], null);
           currentRegion.initialize(null, null);
           currentSize = currentRegion.getLargestHStoreSize();
         }
         nextRegion =
-          new HRegion(tabledir, hlog, fs, conf, info[i + 1], null);
+          HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1], null);
         nextRegion.initialize(null, null);
         nextSize = nextRegion.getLargestHStoreSize();
 
@@ -324,7 +324,7 @@
 
       // Scan root region to find all the meta regions
       
-      root = new HRegion(rootTableDir, hlog, fs, conf,
+      root = HRegion.newHRegion(rootTableDir, hlog, fs, conf,
           HRegionInfo.ROOT_REGIONINFO, null);
       root.initialize(null, null);
 

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java?rev=896138&r1=896137&r2=896138&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java Tue Jan  5 17:26:49 2010
@@ -27,6 +27,9 @@
 import java.util.NavigableSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
+import java.util.HashMap;
+import java.util.Collections;
+import java.util.SortedSet;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
@@ -34,7 +37,9 @@
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.RowFilterInterface;
 import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 
@@ -74,6 +79,15 @@
  * execute {@link #setCacheBlocks(boolean)}.
  */
 public class Scan implements Writable {
+  // An empty navigable set to be used when adding a whole family
+  private static final NavigableSet<byte[]> EMPTY_NAVIGABLE_SET
+    = new TreeSet<byte[]>();
+  // Version -1 first scan version. negative number used to distnguish
+  // from previous version which would use this value to envode the start-key
+  // length
+  private static final byte SCAN_VERSION = (byte) -1;
+
+
   private byte [] startRow = HConstants.EMPTY_START_ROW;
   private byte [] stopRow  = HConstants.EMPTY_END_ROW;
   private int maxVersions = 1;
@@ -84,6 +98,9 @@
   private TimeRange tr = new TimeRange();
   private Map<byte [], NavigableSet<byte []>> familyMap =
     new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
+  // additional data for the scan
+  protected Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+    new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
   
   /**
    * Create a Scan operation across all rows.
@@ -153,7 +170,7 @@
    */
   public Scan addFamily(byte [] family) {
     familyMap.remove(family);
-    familyMap.put(family, null);
+    familyMap.put(family, EMPTY_NAVIGABLE_SET);
     return this;
   }
   
@@ -489,6 +506,80 @@
   public boolean getCacheBlocks() {
     return cacheBlocks;
   }
+
+  /**
+   * @param key The key.
+   * @return The value.
+   */
+  public byte[] getValue(byte[] key) {
+    return getValue(new ImmutableBytesWritable(key));
+  }
+
+  private byte[] getValue(final ImmutableBytesWritable key) {
+    ImmutableBytesWritable ibw = values.get(key);
+    if (ibw == null)
+      return null;
+    return ibw.get();
+  }
+
+  /**
+   * @param key The key.
+   * @return The value as a string.
+   */
+  public String getValue(String key) {
+    byte[] value = getValue(Bytes.toBytes(key));
+    if (value == null)
+      return null;
+    return Bytes.toString(value);
+  }
+
+  /**
+   * @return All values.
+   */
+  public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
+     return Collections.unmodifiableMap(values);
+  }
+
+  /**
+   * @param key The key.
+   * @param value The value.
+   */
+  public void setValue(byte[] key, byte[] value) {
+    setValue(new ImmutableBytesWritable(key), value);
+  }
+
+  /*
+   * @param key The key.
+   * @param value The value.
+   */
+  private void setValue(final ImmutableBytesWritable key,
+      final byte[] value) {
+    values.put(key, new ImmutableBytesWritable(value));
+  }
+
+  /*
+   * @param key The key.
+   * @param value The value.
+   */
+  private void setValue(final ImmutableBytesWritable key,
+      final ImmutableBytesWritable value) {
+    values.put(key, value);
+  }
+
+  /**
+   * @param key The key.
+   * @param value The value.
+   */
+  public void setValue(String key, String value) {
+    setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+  }
+
+  /**
+   * @param key Key whose key and value we're to remove from HTD parameters.
+   */
+  public void remove(final byte [] key) {
+    values.remove(new ImmutableBytesWritable(key));
+  }
   
   /**
    * @return String
@@ -541,6 +632,21 @@
       }
     }
     sb.append("}");
+    
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        values.entrySet()) {
+      String key = Bytes.toString(e.getKey().get());
+      String value = Bytes.toString(e.getValue().get());
+      if (key == null) {
+        continue;
+      }
+      sb.append(", ");
+      sb.append(key);
+      sb.append(" => '");
+      sb.append(value);
+      sb.append("'");
+    }
+
     return sb.toString();
   }
   
@@ -558,7 +664,13 @@
   //Writable
   public void readFields(final DataInput in)
   throws IOException {
-    this.startRow = Bytes.readByteArray(in);
+    byte versionOrLength = in.readByte();
+    if (versionOrLength == SCAN_VERSION) {
+      this.startRow = Bytes.readByteArray(in);
+    } else {
+      int length = (int) Writables.readVLong(in, versionOrLength);
+      this.startRow = Bytes.readByteArray(in, length);
+    }
     this.stopRow = Bytes.readByteArray(in);
     this.maxVersions = in.readInt();
     this.caching = in.readInt();
@@ -587,10 +699,22 @@
       }
       this.familyMap.put(family, set);
     }
-  }  
+    this.values.clear();
+    if (versionOrLength == SCAN_VERSION) {
+      int numValues = in.readInt();
+      for (int i = 0; i < numValues; i++) {
+        ImmutableBytesWritable key = new ImmutableBytesWritable();
+        ImmutableBytesWritable value = new ImmutableBytesWritable();
+        key.readFields(in);
+        value.readFields(in);
+        values.put(key, value);
+      }
+    }
+  }
 
   public void write(final DataOutput out)
   throws IOException {
+    out.writeByte(SCAN_VERSION);
     Bytes.writeByteArray(out, this.startRow);
     Bytes.writeByteArray(out, this.stopRow);
     out.writeInt(this.maxVersions);
@@ -624,5 +748,11 @@
         out.writeInt(0);
       }
     }
+    out.writeInt(values.size());
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        values.entrySet()) {
+      e.getKey().write(out);
+      e.getValue().write(out);
+    }
   }
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/FilterList.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/FilterList.java?rev=896138&r1=896137&r2=896138&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/FilterList.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/FilterList.java Tue Jan  5 17:26:49 2010
@@ -24,6 +24,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Iterator;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -156,17 +157,19 @@
   }
 
   public ReturnCode filterKeyValue(KeyValue v) {
-    for (Filter filter : filters) {
+    Iterator<Filter> iter = filters.iterator();
+    while (iter.hasNext()) {
+      Filter filter = iter.next();
       if (operator == Operator.MUST_PASS_ALL) {
         if (filter.filterAllRemaining()) {
           return ReturnCode.NEXT_ROW;
         }
         switch (filter.filterKeyValue(v)) {
-        case INCLUDE:
-          continue;
-        case NEXT_ROW:
-        case SKIP:
-          return ReturnCode.SKIP;
+          case INCLUDE:
+            continue;
+          case NEXT_ROW:
+          case SKIP:
+            return ReturnCode.SKIP;
         }
       } else if (operator == Operator.MUST_PASS_ONE) {
         if (filter.filterAllRemaining()) {
@@ -174,11 +177,16 @@
         }
 
         switch (filter.filterKeyValue(v)) {
-        case INCLUDE:
-          return ReturnCode.INCLUDE;
-        case NEXT_ROW:
-        case SKIP:
-          continue;
+          case INCLUDE:
+            // let all the other filters look at this KeyValue since their correct operation may depend on it
+            while (iter.hasNext()){
+              Filter nextFilter = iter.next();
+              nextFilter.filterKeyValue(v);
+            }
+            return ReturnCode.INCLUDE;
+          case NEXT_ROW:
+          case SKIP:
+            continue;
         }
       }
     }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=896138&r1=896137&r2=896138&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Tue Jan  5 17:26:49 2010
@@ -19,53 +19,54 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.DroppedSnapshotException;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.NotServingRegionException;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.io.Reference.Range;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.StringUtils;
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.apache.hadoop.fs.FSDataOutputStream;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hbase.DroppedSnapshotException;
+ import org.apache.hadoop.hbase.HBaseConfiguration;
+ import org.apache.hadoop.hbase.HColumnDescriptor;
+ import org.apache.hadoop.hbase.HConstants;
+ import org.apache.hadoop.hbase.HRegionInfo;
+ import org.apache.hadoop.hbase.HTableDescriptor;
+ import org.apache.hadoop.hbase.KeyValue;
+ import org.apache.hadoop.hbase.NotServingRegionException;
+ import org.apache.hadoop.hbase.client.Delete;
+ import org.apache.hadoop.hbase.client.Get;
+ import org.apache.hadoop.hbase.client.Put;
+ import org.apache.hadoop.hbase.client.Result;
+ import org.apache.hadoop.hbase.client.Scan;
+ import org.apache.hadoop.hbase.filter.Filter;
+ import org.apache.hadoop.hbase.filter.RowFilterInterface;
+ import org.apache.hadoop.hbase.io.HeapSize;
+ import org.apache.hadoop.hbase.io.Reference.Range;
+ import org.apache.hadoop.hbase.io.hfile.BlockCache;
+ import org.apache.hadoop.hbase.ipc.HRegionInterface;
+ import org.apache.hadoop.hbase.util.Bytes;
+ import org.apache.hadoop.hbase.util.ClassSize;
+ import org.apache.hadoop.hbase.util.FSUtils;
+ import org.apache.hadoop.hbase.util.Writables;
+ import org.apache.hadoop.util.Progressable;
+ import org.apache.hadoop.util.StringUtils;
+
+ import java.io.IOException;
+ import java.io.UnsupportedEncodingException;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.NavigableSet;
+ import java.util.TreeMap;
+ import java.util.TreeSet;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.ConcurrentSkipListMap;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.concurrent.atomic.AtomicLong;
+ import java.util.concurrent.locks.ReentrantReadWriteLock;
+ import java.lang.reflect.Constructor;
 
-/**
+ /**
  * HRegion stores data for a certain region of a table.  It stores all columns
  * for each row. A given table consists of one or more HRegions.
  *
@@ -228,7 +229,9 @@
   }
   
   /**
-   * HRegion constructor.
+   * HRegion constructor.  This constructor should only be used for testing and
+   * extensions.  Instances of HRegion should be instantiated with the
+   * {@link org.apache.hadoop.hbase.regionserver.HRegion#newHRegion( org.apache.hadoop.fs.Path, HLog, org.apache.hadoop.fs.FileSystem, org.apache.hadoop.hbase.HBaseConfiguration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)} method.
    *
    * @param basedir qualified path of directory where region should be located,
    * usually the table directory.
@@ -246,8 +249,10 @@
    * @param flushListener an object that implements CacheFlushListener or null
    * making progress to master -- otherwise master might think region deploy
    * failed.  Can be null.
+   *
+   * @see org.apache.hadoop.hbase.regionserver.HRegion#newHRegion(org.apache.hadoop.fs.Path, HLog, org.apache.hadoop.fs.FileSystem, org.apache.hadoop.hbase.HBaseConfiguration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
    */
-  public HRegion(Path basedir, HLog log, FileSystem fs, HBaseConfiguration conf, 
+  public HRegion(Path basedir, HLog log, FileSystem fs, HBaseConfiguration conf,
       HRegionInfo regionInfo, FlushRequester flushListener) {
     this.basedir = basedir;
     this.comparator = regionInfo.getComparator();
@@ -655,10 +660,10 @@
       // Done!
       // Opening the region copies the splits files from the splits directory
       // under each region.
-      HRegion regionA = new HRegion(basedir, log, fs, conf, regionAInfo, null);
+      HRegion regionA = HRegion.newHRegion(basedir, log, fs, conf, regionAInfo, null);
       regionA.initialize(dirA, null);
       regionA.close();
-      HRegion regionB = new HRegion(basedir, log, fs, conf, regionBInfo, null);
+      HRegion regionB = HRegion.newHRegion(basedir, log, fs, conf, regionBInfo, null);
       regionB.initialize(dirB, null);
       regionB.close();
 
@@ -881,7 +886,7 @@
    * @throws DroppedSnapshotException Thrown when replay of hlog is required
    * because a Snapshot was not properly persisted.
    */
-  private boolean internalFlushcache() throws IOException {
+  protected boolean internalFlushcache() throws IOException {
     final long startTime = System.currentTimeMillis();
     // Clear flush flag.
     // Record latest flush time
@@ -908,12 +913,19 @@
     this.updatesLock.writeLock().lock();
     // Get current size of memstores.
     final long currentMemStoreSize = this.memstoreSize.get();
+    List<StoreFlusher> storeFlushers = new ArrayList<StoreFlusher>();
     try {
-      for (Store s: stores.values()) {
-        s.snapshot();
-      }
       sequenceId = log.startCacheFlush();
       completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId);
+      // create the store flushers
+      for (Store s : stores.values()) {
+        storeFlushers.add(s.getStoreFlusher(completeSequenceId));
+      }
+
+      // prepare flush (take a snapshot)
+      for (StoreFlusher flusher: storeFlushers) {
+         flusher.prepare();
+      }
     } finally {
       this.updatesLock.writeLock().unlock();
     }
@@ -927,12 +939,29 @@
       // A.  Flush memstore to all the HStores.
       // Keep running vector of all store files that includes both old and the
       // just-made new flush store file.
-      for (Store hstore: stores.values()) {
-        boolean needsCompaction = hstore.flushCache(completeSequenceId);
-        if (needsCompaction) {
-          compactionRequested = true;
+      for (StoreFlusher flusher : storeFlushers) {
+        flusher.flushCache();
+      }
+
+      internalPreFlashcacheCommit();
+
+      /**
+       * Switch between memstore and the new store file
+       */
+      this.newScannerLock.writeLock().lock();
+      try {
+        for (StoreFlusher flusher : storeFlushers) {
+          boolean needsCompaction = flusher.commit();
+          if (needsCompaction) {
+            compactionRequested = true;
+          }
         }
+      } finally {
+        this.newScannerLock.writeLock().unlock();
       }
+
+      // clear the stireFlushers list
+      storeFlushers.clear();
       // Set down the memstore size by amount of flush.
       this.memstoreSize.addAndGet(-currentMemStoreSize);
     } catch (Throwable t) {
@@ -974,8 +1003,18 @@
     }
     return compactionRequested;
   }
-  
-  /**
+
+
+   /**
+    * A hook for sub classed wishing to perform operations prior to the cache
+    * flush commit stage.
+    *
+    * @throws java.io.IOException allow children to throw exception
+    */
+   protected void internalPreFlashcacheCommit() throws IOException {
+   }
+
+   /**
    * Get the sequence number to be associated with this cache flush. Used by
    * TransactionalRegion to not complete pending transactions.
    * 
@@ -1070,13 +1109,17 @@
           scan.addFamily(family);
         }
       }
-      return new RegionScanner(scan, additionalScanners);
-      
+      return instantiateInternalScanner(scan, additionalScanners);
+
     } finally {
       newScannerLock.readLock().unlock();
     }
   }
 
+  protected InternalScanner instantiateInternalScanner(Scan scan, List<KeyValueScanner> additionalScanners) throws IOException {
+    return new RegionScanner(scan, additionalScanners);
+  }
+
   //////////////////////////////////////////////////////////////////////////////
   // set() methods for client use.
   //////////////////////////////////////////////////////////////////////////////
@@ -1091,6 +1134,7 @@
     checkReadOnly();
     checkResources();
     splitsAndClosesLock.readLock().lock();
+    newScannerLock.writeLock().lock();
     Integer lid = null;
     try {
       byte [] row = delete.getRow();
@@ -1118,6 +1162,7 @@
       }
     } finally {
       if(lockid == null) releaseRowLock(lid);
+      newScannerLock.writeLock().unlock();
       splitsAndClosesLock.readLock().unlock();
     }
   }
@@ -1224,6 +1269,7 @@
     // will be extremely rare; we'll deal with it when it happens.
     checkResources();
     splitsAndClosesLock.readLock().lock();
+    newScannerLock.writeLock().lock();
     try {
       // We obtain a per-row lock, so other clients will block while one client
       // performs an update. The read lock is released by the client calling
@@ -1248,6 +1294,7 @@
         if(lockid == null) releaseRowLock(lid);
       }
     } finally {
+      newScannerLock.writeLock().unlock();
       splitsAndClosesLock.readLock().unlock();
     }
   }
@@ -1822,6 +1869,45 @@
   }
   
   // Utility methods
+  /**
+   * A utility method to create new instances of HRegion based on the
+   * {@link org.apache.hadoop.hbase.HConstants#REGION_IMPL} configuration
+   * property. 
+   * @param basedir qualified path of directory where region should be located,
+   * usually the table directory.
+   * @param log The HLog is the outbound log for any updates to the HRegion
+   * (There's a single HLog for all the HRegions on a single HRegionServer.)
+   * The log file is a logfile from the previous execution that's
+   * custom-computed for this HRegion. The HRegionServer computes and sorts the
+   * appropriate log info for this HRegion. If there is a previous log file
+   * (implying that the HRegion has been written-to before), then read it from
+   * the supplied path.
+   * @param fs is the filesystem.
+   * @param conf is global configuration settings.
+   * @param regionInfo - HRegionInfo that describes the region
+   * is new), then read them from the supplied path.
+   * @param flushListener an object that implements CacheFlushListener or null
+   * making progress to master -- otherwise master might think region deploy
+   * failed.  Can be null.
+   * @return the new instance
+   */
+   public static HRegion newHRegion(Path basedir, HLog log, FileSystem fs, HBaseConfiguration conf,
+                                    HRegionInfo regionInfo, FlushRequester flushListener) {
+     try {
+       @SuppressWarnings("unchecked")
+       Class<? extends HRegion> regionClass =
+           (Class<? extends HRegion>) conf.getClass(HConstants.REGION_IMPL, HRegion.class);
+
+       Constructor<? extends HRegion> c =
+           regionClass.getConstructor(Path.class, HLog.class, FileSystem.class,
+               HBaseConfiguration.class, HRegionInfo.class, FlushRequester.class);
+
+       return c.newInstance(basedir, log, fs, conf, regionInfo, flushListener);
+     } catch (Throwable e) {
+       // todo: what sould I throw here?
+       throw new IllegalStateException("Could not instantiate a region instance.", e);
+     }
+   }
 
   /**
    * Convenience method creating new HRegions. Used by createTable and by the
@@ -1844,7 +1930,7 @@
     Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
     FileSystem fs = FileSystem.get(conf);
     fs.mkdirs(regionDir);
-    HRegion region = new HRegion(tableDir,
+    HRegion region = HRegion.newHRegion(tableDir,
       new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null),
       fs, conf, info, null);
     region.initialize(null, null);
@@ -1873,7 +1959,7 @@
     if (info == null) {
       throw new NullPointerException("Passed region info is null");
     }
-    HRegion r = new HRegion(
+    HRegion r = HRegion.newHRegion(
         HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName()),
         log, FileSystem.get(conf), conf, info, null);
     r.initialize(null, null);
@@ -2183,7 +2269,7 @@
       LOG.debug("Files for new region");
       listPaths(fs, newRegionDir);
     }
-    HRegion dstRegion = new HRegion(basedir, log, fs, conf, newRegionInfo, null);
+    HRegion dstRegion = HRegion.newHRegion(basedir, log, fs, conf, newRegionInfo, null);
     dstRegion.initialize(null, null);
     dstRegion.compactStores();
     if (LOG.isDebugEnabled()) {
@@ -2437,9 +2523,9 @@
     String metaStr = Bytes.toString(HConstants.META_TABLE_NAME);
     // Currently expects tables have one region only.
     if (p.getName().startsWith(rootStr)) {
-      region = new HRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null);
+      region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null);
     } else if (p.getName().startsWith(metaStr)) {
-      region = new HRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO,
+      region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO,
           null);
     } else {
       throw new IOException("Not a known catalog table: " + p.toString());

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=896138&r1=896137&r2=896138&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Tue Jan  5 17:26:49 2010
@@ -1558,7 +1558,7 @@
   
   protected HRegion instantiateRegion(final HRegionInfo regionInfo)
       throws IOException {
-    HRegion r = new HRegion(HTableDescriptor.getTableDir(rootDir, regionInfo
+    HRegion r = HRegion.newHRegion(HTableDescriptor.getTableDir(rootDir, regionInfo
         .getTableDesc().getName()), this.hlog, this.fs, conf, regionInfo,
         this.cacheFlusher);
     r.initialize(null,  new Progressable() {

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java?rev=896138&r1=896137&r2=896138&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java Tue Jan  5 17:26:49 2010
@@ -44,8 +44,8 @@
  * has same attributes as ConcurrentSkipListSet: e.g. tolerant of concurrent
  * get and set and won't throw ConcurrentModificationException when iterating.
  */
-class KeyValueSkipListSet implements NavigableSet<KeyValue> {
-  private final ConcurrentNavigableMap<KeyValue, KeyValue> delegatee;
+class KeyValueSkipListSet implements NavigableSet<KeyValue>, Cloneable {
+  private ConcurrentNavigableMap<KeyValue, KeyValue> delegatee;
 
   KeyValueSkipListSet(final KeyValue.KVComparator c) {
     this.delegatee = new ConcurrentSkipListMap<KeyValue, KeyValue>(c);
@@ -201,4 +201,17 @@
   public <T> T[] toArray(T[] a) {
     throw new UnsupportedOperationException("Not implemented");
   }
+
+  @Override
+  public KeyValueSkipListSet clone()  {
+    assert this.delegatee.getClass() == ConcurrentSkipListMap.class;
+    KeyValueSkipListSet clonedSet = null;
+    try {
+      clonedSet = (KeyValueSkipListSet) super.clone();
+    } catch (CloneNotSupportedException e) {
+      throw new InternalError(e.getMessage());
+    }
+    clonedSet.delegatee = ((ConcurrentSkipListMap) this.delegatee).clone();
+    return clonedSet;
+  }
 }
\ No newline at end of file



Mime
View raw message