hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r896138 [5/9] - in /hadoop/hbase/branches/0.20: ./ src/contrib/ src/contrib/indexed/ src/contrib/indexed/lib/ src/contrib/indexed/lib/fmpp-0.19.14/ src/contrib/indexed/src/ src/contrib/indexed/src/fmpp/ src/contrib/indexed/src/fmpp/src/ src...
Date Tue, 05 Jan 2010 17:26:53 GMT
Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/arrays/ShortArrayList.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/arrays/ShortArrayList.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/arrays/ShortArrayList.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/arrays/ShortArrayList.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,371 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.arrays;
+
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import org.apache.commons.lang.ArrayUtils;
+
+/**
+ * A list designed to be used as the key store for indexed HBase.  
+ * <p/>
+ * NOTE: This class is completely unsynchronised.
+ */
+public class ShortArrayList implements List<Short> {
+
+
+  //DO NOT EDIT THIS FILE, EDIT THE FMPP TEMPLATE INSTEAD.
+  //To generate source execute
+  // **/src/contib/indexed# ant -f build-fmpp.xml -lib lib/fmpp-0.19.14 
+
+  /**
+   * Default initial size of the array backing this list.
+   */
+  private static final int DEFAULT_SIZE = 1;
+
+  /**
+   * The scaling factor we use to resize the backing buffer when the list needs to grow.
+   */
+  private static final float SCALE_FACTOR = 1.5f;
+
+  /**
+   * The array backing this list.
+   */
+  private short[] values;
+
+  /**
+   * The number of values present in the list.
+   */
+  private int size;
+
+
+  /**
+   * Constructor that initialises with the default size.
+   */
+  public ShortArrayList() {
+    this(DEFAULT_SIZE);
+  }
+
+  /**
+   * Constructor which initialises with the specified initial capacity.
+   *
+   * @param initialCapacity the initial capacity of the backing array
+   */
+  public ShortArrayList(int initialCapacity) {
+    values = new short[initialCapacity];
+  }
+
+  /**
+   * Constructor which initialises the content from the supplied array list.
+   *
+   * @param initial the initial contents
+   */
+  public ShortArrayList(ShortArrayList initial) {
+    // Initialise the internal storage to the appropriate size
+    this(initial.size);
+
+    // Copy over the references/values
+    System.arraycopy(initial.values, 0, this.values, 0, initial.size);
+    this.size = initial.size;
+  }
+
+  /**
+   * Adds the element to the end of the list.
+   *
+   * @param element the new element
+   */
+  public void add(short element) {
+    ensureCapacity(size + 1);
+    values[size] = element;
+    size++;
+  }
+
+  @Override
+  public void add(byte[] bytes) {
+    add(fromBytes(bytes));
+  }
+
+  @Override
+  public int compare(Short needle, int compareToIndex) {
+    short compareTo = values[compareToIndex];
+    if (needle > compareTo) {
+      return 1;
+    } else if (needle < compareTo) {
+      return -1;
+    } else {
+      return 0;
+    }
+  }
+
+  /**
+   * Grows the backing array to the requested size.
+   *
+   * @param requested the new capacity.
+   */
+  private void ensureCapacity(int requested) {
+    // If we need to resize
+    if (requested > values.length) {
+      // Calculate the new size, growing slowly at the start to avoid overallocation too early.
+      int newSize = Math.max(requested, (int) (values.length * SCALE_FACTOR + 1));
+
+      // Create the new array
+      short[] newValues = new short[newSize];
+
+      // Populate the new backing array
+      System.arraycopy(values, 0, newValues, 0, size);
+      values = newValues;
+    }
+  }
+
+  /**
+   * Retrieves the element at the requested index.
+   *
+   * @param index the element index you wish to retrieve
+   * @return the value at that index
+   */
+  public short get(int index) {
+    if (index >= size) {
+      throw new ArrayIndexOutOfBoundsException("Attempted to access index " + index + " but array is " + size + " elements");
+    }
+
+    return values[index];
+  }
+
+  /**
+   * Searches the list for the nominated value.
+   *
+   * @param searchFor the value you are looking for
+   * @return the first index the value was found at or -1 if not found
+   */
+  public int indexOf(short searchFor) {
+    // Check each of the values. Don't bother with get() since we don't need its protection.
+    for (int i = 0; i < size; i++) {
+      if (values[i] == searchFor) {
+        return i;
+      }
+    }
+
+    // Didn't find it.
+    return -1;
+  }
+
+  /**
+   * Simple iterator that runs over the values in the list.
+   */
+  private static final class InternalIterator
+    implements Iterator<Short> {
+
+    private short[] values;
+    private int size;
+    private int current = 0;
+
+    private InternalIterator(short[] values, int size) {
+      this.values = values;
+      this.size = size;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public boolean hasNext() {
+      return current < size;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public Short next() {
+      if (!hasNext()) {
+        throw new NoSuchElementException();
+      }
+      return values[current++];
+    }
+
+    /**
+     * Not supported.
+     */
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException("remove() is not supported");
+    }
+  }
+
+  /**
+   * Returns an iterator over the underlying content. Note that this is completely unsynchronised and the contents can change under you.
+   */
+  @Override
+  public Iterator<Short> iterator() {
+    return new InternalIterator(values, size);
+  }
+
+  /**
+   * Checks if the list is empty.
+   *
+   * @return true if the list is empty
+   */
+  @Override
+  public boolean isEmpty() {
+    return size == 0;
+  }
+
+  /**
+   * Sets the specified index to the nominated value.
+   *
+   * @param index    the list index
+   * @param newValue the value
+   */
+  public void set(int index, short newValue) {
+    if (index >= size) {
+      throw new ArrayIndexOutOfBoundsException("Attempted to access index " + index + " but array is " + size + " elements");
+    }
+
+    values[index] = newValue;
+
+  }
+
+  @Override
+  public void set(int index, byte[] newValue) {
+    set(index, fromBytes(newValue));
+  }
+
+  /**
+   * Removes the specified index from the list.
+   *
+   * @param index the index to remove
+   * @return the original value
+   */
+  public short remove(int index) {
+    if (index >= size) {
+      throw new ArrayIndexOutOfBoundsException("Attempted to access index " + index + " but array is " + size + " elements");
+    }
+
+    short original = values[index];
+    System.arraycopy(values, index + 1, values, index, size - index - 1);
+    size--;
+    return original;
+  }
+
+
+  /**
+   * Inserts at the specified index to the list.
+   *
+   * @param index    the index to insert
+   * @param newValue the value to insert
+   */
+  public void insert(int index, short newValue) {
+    if (index > size) {
+      throw new ArrayIndexOutOfBoundsException("Attempted to access index " + index + " but array is " + size + " elements");
+    }
+
+    ensureCapacity(size + 1);
+    if (index != size) {
+      System.arraycopy(values, index, values, index + 1, size - index);
+    }
+    values[index] = newValue;
+    size++;
+  }
+
+  @Override
+  public void insert(int index, byte[] newValue) {
+    insert(index, fromBytes(newValue));
+  }
+
+  /**
+   * Removes the last item in the list.
+   *
+   * @return the original value
+   */
+  public short removeLast() {
+    if (size < 1) {
+      throw new ArrayIndexOutOfBoundsException("Attempted to remove last element from array with size 0");
+    }
+
+    short result = values[size - 1];
+    size--;
+
+
+    return result;
+  }
+
+  /**
+   * Returns the current number of elements in this list.
+   *
+   * @return the number of elements.
+   */
+  public int size() {
+    return size;
+  }
+
+  @Override
+  public Short fromBytes(byte[] bytes) {
+    return Bytes.toShort(bytes);
+  }
+
+
+  @Override
+  public long heapSize() {
+    return FIXED_OVERHEAD + Bytes.SIZEOF_SHORT * values.length;
+  }
+
+
+  /**
+   * Return a nice view of the list.
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    return Arrays.toString(Arrays.copyOf(values, size));
+  }
+
+  /**
+   * Checks the contents of the collection for equality.
+   * <p/>
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean equals(Object compareTo) {
+    if (this == compareTo) {
+      return true;
+    }
+    if (!(compareTo instanceof ShortArrayList)) {
+      return false;
+    }
+
+    ShortArrayList that = (ShortArrayList) compareTo;
+
+    return this.size == that.size &&
+      ArrayUtils.isEquals(this.values, that.values);
+  }
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public int hashCode() {
+    return 31 * Arrays.hashCode(values) + size;
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/BitSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/BitSet.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/BitSet.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/BitSet.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,440 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
+
+import java.util.Arrays;
+
+/**
+ * A bitset implementation of the {@link IntSet} interface.
+ * This class is not thread safe.
+ */
+class BitSet extends IntSetBase {
+
+  /**
+   * Number of bits in a word.
+   */
+  private static final int WORD_BIT_COUNT = 64;
+  /**
+   * The power of two to reach the word size. This is the shift needed to
+   * get the index from a word.
+   */
+  private static final int INDEX_SHIFT = 6;
+  /**
+   * The fixed part in the heap size calcualtion.
+   */
+  static final int FIXED_SIZE = ClassSize.align(ClassSize.OBJECT +
+    Bytes.SIZEOF_INT * 4 + ClassSize.ARRAY +
+    ClassSize.REFERENCE + Bytes.SIZEOF_LONG);
+
+  /**
+   *
+   */
+  private int capacity;
+  /*
+    * Memebers deliberatly package protected to allow direct interaction.
+   */
+  long[] words;
+  /**
+   * The number of bits which are not zero.
+   */
+  int size;
+  /**
+   * The min bit which is not zero.
+   */
+  int minElement;
+  /**
+   * The max bit which is not zero.
+   */
+  int maxElement;
+  /**
+   * The heap size.
+   */
+  private long heapSize;
+
+  /**
+   * Construct a new bitset with a given maximum element.
+   *
+   * @param capacity the max element of this set (exclusive).
+   */
+  public BitSet(int capacity) {
+    assert capacity >= SMALLEST && capacity <= LARGEST;
+    this.capacity = capacity;
+    clear();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public int size() {
+    calcSizeMinMax();
+    return size;
+  }
+
+  /**
+   * Calculates the size/min/max fields. Used to lazily calculate these
+   * as they are computationally expensive.
+   */
+  private void calcSizeMinMax() {
+    if (size < 0) {
+      size = 0;
+      minElement = -1;
+      maxElement = -1;
+      int base = 0;
+      for (int i = 0; i < words.length; i++) {
+        if (words[i] != 0) {
+          size += Long.bitCount(words[i]);
+          minElement = minElement >= 0 ? minElement :
+            base + Long.numberOfTrailingZeros(words[i]);
+          maxElement = base + WORD_BIT_COUNT
+            - 1 - Long.numberOfLeadingZeros(words[i]);
+        }
+        base += WORD_BIT_COUNT;
+      }
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void clear() {
+    size = 0;
+    minElement = -1;
+    maxElement = -1;
+    if (words == null) {
+      words = new long[capacity / 64 + (capacity % 64 > 0 ? 1 : 0)];
+      heapSize = ClassSize.align(FIXED_SIZE + words.length * Long.SIZE);
+    } else {
+      for (int i = 0; i < words.length; i++) {
+        words[i] = 0L;
+      }
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean isEmpty() {
+    return size() == 0;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public int capacity() {
+    return capacity;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  void addNext(int element) {
+    assert element > maxElement && element < capacity;
+    assert size >= 0;
+    minElement = minElement == -1 ? element : minElement;
+    maxElement = element;
+    int word = element >> INDEX_SHIFT;
+    words[word] |= (1L << element);
+    size++;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean contains(int element) {
+    assert element >= 0 && element <= capacity;
+    int word = element >> INDEX_SHIFT;
+    return (words[word] & (1l << element)) != 0L;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet complement() {
+    size = -1;
+    for (int i = 0; i < words.length; i++) {
+      words[i] = ~words[i];
+    }
+    words[words.length - 1] ^= capacity % WORD_BIT_COUNT == 0 ?
+      0 : -1L << capacity;  // get rid of the trailing ones
+    return this;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet intersect(IntSet other) {
+    assert this.capacity == other.capacity();
+    return other.getClass() == BitSet.class ? intersect((BitSet) other) :
+      intersect((SparseBitSet) other);
+  }
+
+  /**
+   * Interserct with a BitSet.
+   *
+   * @param other the other bitset.
+   * @return the intersection result (the modified this)
+   */
+  private IntSet intersect(BitSet other) {
+    size = -1;
+    for (int i = 0; i < words.length; i++) {
+      words[i] &= other.words[i];
+    }
+    return this;
+  }
+
+  /**
+   * Intersect with a SparseBitSet
+   *
+   * @param other the sparse bit set
+   * @return the intersection result (the modified this)
+   */
+  private IntSet intersect(SparseBitSet other) {
+    size = -1;
+    int index;
+    int prevIndex = -1;
+    for (int i = 0; i < other.length; i++) {
+      index = other.indices[i];
+      for (int j = prevIndex + 1; j < index; j++) {
+        words[j] = 0;
+      }
+      words[index] &= other.words[i];
+      prevIndex = index;
+    }
+    for (int j = prevIndex + 1; j < words.length; j++) {
+      words[j] = 0;
+    }
+
+    return this;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet unite(IntSet other) {
+    assert this.capacity == other.capacity();
+    return other.getClass() == BitSet.class ? unite((BitSet) other) :
+      unite((SparseBitSet) other);
+  }
+
+  private IntSet unite(BitSet other) {
+    size = -1;
+    for (int i = 0; i < words.length; i++) {
+      words[i] |= other.words[i];
+    }
+    return this;
+  }
+
+  private IntSet unite(SparseBitSet other) {
+    size = -1;
+    for (int i = 0; i < other.length; i++) {
+      words[other.indices[i]] |= other.words[i];
+    }
+    return this;
+  }
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet subtract(IntSet other) {
+    assert this.capacity == other.capacity();
+    return other.getClass() == BitSet.class ? subtract((BitSet) other) :
+      subtract((SparseBitSet) other);
+  }
+
+  private IntSet subtract(BitSet other) {
+    size = -1;
+    for (int i = 0; i < words.length; i++) {
+      words[i] &= ~other.words[i];
+    }
+    return this;
+  }
+
+  private IntSet subtract(SparseBitSet other) {
+    size = -1;
+    for (int i = 0; i < other.length; i++) {
+      words[other.indices[i]] &= ~other.words[i];
+    }
+    return this;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet difference(IntSet other) {
+    assert this.capacity == other.capacity();
+    return other.getClass() == BitSet.class ? difference((BitSet) other) :
+      difference((SparseBitSet) other);
+  }
+
+  private IntSet difference(BitSet other) {
+    size = -1;
+    for (int i = 0; i < words.length; i++) {
+      words[i] ^= other.words[i];
+    }
+    return this;
+  }
+
+  private IntSet difference(SparseBitSet other) {
+    size = -1;
+    for (int i = 0; i < other.length; i++) {
+      words[other.indices[i]] ^= other.words[i];
+    }
+    return this;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public BitSet clone() {
+    BitSet result = (BitSet) super.clone();
+    result.words = this.words.clone();
+    return result;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSetIterator iterator() {
+    calcSizeMinMax();
+    return new IntSetIteratorImpl();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public long heapSize() {
+    return heapSize;
+  }
+
+  private class IntSetIteratorImpl implements IntSetIterator {
+    private static final int PREFETCH_SIZE = 11;
+
+    private int[] nextValues;
+    private int nextValueIndex;
+    private int numValues;
+    private int nextWordIndex;
+    private int largestNextValue;
+    private int limit;
+
+    private IntSetIteratorImpl() {
+      nextValues = new int[PREFETCH_SIZE];
+      nextWordIndex = minElement >> INDEX_SHIFT;
+      limit = maxElement;
+      largestNextValue = -1;
+      fill();
+    }
+
+    /**
+     * Fills up the array of set bits.
+     */
+    private void fill() {
+      numValues = 0;
+      nextValueIndex = 0;
+      while (largestNextValue < limit && nextWordIndex < words.length) {
+        if (words[nextWordIndex] != 0) {
+          long word = words[nextWordIndex];
+          int base = nextWordIndex * WORD_BIT_COUNT;
+          int shiftAmount = largestNextValue - base + 1;
+          if (shiftAmount > 0) {
+            word = ((word >>> shiftAmount) << shiftAmount);
+          }
+          while (word != 0 && numValues < nextValues.length) {
+            int setBitIndex = Long.numberOfTrailingZeros(word);
+            largestNextValue = nextValues[numValues++] = setBitIndex + base;
+            word ^= 1L << setBitIndex;
+          }
+          if (word == 0) {
+            nextWordIndex++;
+          }
+          if (numValues == nextValues.length || largestNextValue >= limit ||
+            nextWordIndex >= words.length) {
+            break;
+          }
+        } else {
+          nextWordIndex++;
+        }
+      }
+    }
+
+    @Override
+    public boolean hasNext() {
+      return nextValueIndex < numValues || largestNextValue < maxElement;
+    }
+
+    @Override
+    public int next() {
+      if (nextValueIndex < numValues) {
+        return nextValues[nextValueIndex++];
+      } else {
+        fill();
+        if (nextValueIndex < numValues) {
+          return nextValues[nextValueIndex++];
+        }
+      }
+      throw new IndexOutOfBoundsException();
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean equals(Object other) {
+    if (this == other) return true;
+    if (other == null || getClass() != other.getClass()) return false;
+
+    BitSet otherBitSet = (BitSet) other;
+    this.calcSizeMinMax();
+    otherBitSet.calcSizeMinMax();
+
+    return heapSize == otherBitSet.heapSize &&
+      capacity == otherBitSet.capacity &&
+      maxElement == otherBitSet.maxElement &&
+      minElement == otherBitSet.minElement &&
+      size == otherBitSet.size &&
+      Arrays.equals(words, otherBitSet.words);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(words);
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSet.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSet.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSet.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,140 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import org.apache.hadoop.hbase.io.HeapSize;
+
+/**
+ * A set of integers ranged between 0 and MAX. Elements to this set have to be
+ * added in order.
+ */
+public interface IntSet extends HeapSize, Cloneable {
+
+  /**
+   * The value of the smallest element which may be added to this set.
+   */
+  int SMALLEST = 0;
+  /**
+   * The value of the maximal element which may be added to an IntSet.
+   */
+  int LARGEST = Integer.MAX_VALUE;
+
+  /**
+   * Counts the elements in the set.
+   * @return number of elements in the set
+   */
+  int size();
+
+  /**
+   * Empty check.
+   * @return true if the set is empty.
+   */
+  boolean isEmpty();
+
+  /**
+   * The number of elements which this set may contain.
+   * The elements can be any in the range of [0, capacity()-1]
+   * @return the maximum element in the set.
+   */
+  int capacity();
+
+  /**
+   * Checks whether an element is contained in the set.
+   * @param element an intefer in the range of 0 and {@link #capacity()}
+   * @return true if the set contains this element.
+   */
+  boolean contains(int element);
+
+  /**
+   * Clear the set.
+   */
+  void clear();
+
+  /**
+   * Inteverts this set to the 'complementary set' e.g. to a set which contains
+   * exactly the set of elements not contained in this set. This operation is
+   * unsafe, it may change this set.
+   * @return the complementary set.
+   */
+  IntSet complement();
+
+  /**
+   * Intersect this int set with another int set. This operation is unsafe, it
+   * may change this set.
+   * @param other the set to intersect with (not affected by this operation).
+   * @return the intersection (may be a reference to this set).
+   */
+  IntSet intersect(IntSet other);
+
+  /**
+   * Unite this intset with another int set.  This operation is unsafe, it may
+   * change this set.
+   * @param other the set to unite with.
+   * @return the united set, my be a reference to this set.
+   */
+  IntSet unite(IntSet other);
+
+  /**
+   * Subtract all the elements of another set from this one leaving only
+   * elements which do not exist in the other set. This operation is unsafe, it
+   * may change this set.
+   * @param other the set to subtract from this one
+   * @return the subtracted set, may be a referene to this one
+   */
+  IntSet subtract(IntSet other);
+
+  /**
+   * The difference between the two sets, all the elements which are set in
+   * either but not in both. This operation is unsafe, it may change this set.
+   * @param other the other set
+   * @return the difference set, may be a referene to this one
+   */
+  IntSet difference(IntSet other);
+
+  /**
+   * Clone this set. Implementing classes must be able to clone themsevles.
+   * @return the cloned set.
+   */
+  IntSet clone();
+
+  /**
+   * Returns an iterator over the int set.
+   * @return an iterator
+   */
+  IntSetIterator iterator();
+
+  /**
+   * An iterator over an {@link IntSet} that avoids auto-boxing.
+   */
+  public interface IntSetIterator {
+    /**
+     * Returns true if the iteration has more elements.
+     * @return true if the iterator has more elements, otherwise false
+     */
+    boolean hasNext();
+
+    /**
+     * Returns the next element in the iteration.
+     * @return the next element in the iteration
+     * @throws IndexOutOfBoundsException iteration has no more elements
+     */
+    int next();
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBase.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBase.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBase.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,50 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+/**
+ * A package protected interface for modifiable {@link IntSet}s.
+ */
+abstract class IntSetBase implements IntSet {
+
+  /**
+   * Adds the next element to the set. This element must be larger than all
+   * the other elements in this set.
+   *
+   * @param element the element to add
+   */
+  abstract void addNext(int element);
+
+  /**
+   * {@inheritDoc}.
+   * <p/>
+   * This is a convenience method to avoid wrappig the
+   * {@link CloneNotSupportedException} in the sub classes or handling it
+   * in classes using this set.
+   */
+  @Override
+  public IntSet clone() {
+    try {
+      return (IntSetBase) super.clone();
+    } catch (CloneNotSupportedException e) {
+      throw new IllegalStateException("Super clone should be supported.");
+    }
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBuilder.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBuilder.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBuilder.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBuilder.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,109 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+/**
+ * Encapsultes {@link IntSet} building strategy.
+ * May switch set implementations 'behind the scenes'.
+ * To prevent extra memory allocation in the index this class implements the
+ * intset interface, please note however that
+ * all the implemented methods throw {@link UnsupportedOperationException}.
+ */
+public class IntSetBuilder {
+
+  private SparseBitSet sparseBitSet;
+
+  /**
+   * Create a new empty int set.
+   *
+   * @param capacity the capacity of the set.
+   * @return the new set
+   */
+  public static IntSet newEmptyIntSet(int capacity) {
+    SparseBitSet intSet = new SparseBitSet();
+    intSet.setCapacity(capacity);
+    return intSet;
+  }
+
+  /**
+   * Calculates the total size of the elements of an IntSet array.
+   *
+   * @param intSets the intset array to calculate
+   * @return the total size
+   */
+  public static long calcHeapSize(IntSet[] intSets) {
+    int size = 0;
+    for (IntSet set : intSets) {
+      size += set.heapSize();
+    }
+    return size;
+  }
+
+  /**
+   * Start building the intset.
+   *
+   * @return this
+   */
+  public IntSetBuilder start() {
+    sparseBitSet = new SparseBitSet();
+    return this;
+  }
+
+  /**
+   * Adds the next item to this set. Items must be added in order.
+   *
+   * @param element the item to add
+   * @return this
+   */
+  public IntSetBuilder addNext(int element) {
+    sparseBitSet.addNext(element);
+    return this;
+  }
+
+  /**
+   * Convenience method that adds one or more elements.
+   *
+   * @param element  a mandatory element
+   * @param elements an array of optional elements
+   * @return this
+   * @see #addNext(int)
+   */
+  public IntSetBuilder addAll(int element, int... elements) {
+    addNext(element);
+    if (elements != null) {
+      for (int i : elements) {
+        addNext(i);
+      }
+    }
+    return this;
+  }
+
+
+  /**
+   * Finalize the bitset.
+   *
+   * @param numKeys the number of keys in the final bitset
+   * @return the underlying bitset.
+   */
+  public IntSet finish(int numKeys) {
+    sparseBitSet.setCapacity(numKeys);
+    return sparseBitSet;
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/SparseBitSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/SparseBitSet.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/SparseBitSet.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/SparseBitSet.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,430 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
+
+import java.util.Arrays;
+
+/**
+ * A very simple sparse bitset which assumes most bits are not set.
+ * It is composed of an array of indices (int) and an array of longs for the
+ * words.
+ * <p/>
+ * This class is designed so that set operations with the {@link BitSet}
+ * types can be implemented efficiently.
+ * <p/>
+ * This class is not thread safe.
+ */
+class SparseBitSet extends IntSetBase {
+  /**
+   * The fixed part in the heap size calcualtion.
+   */
+  static final int FIXED_SIZE = ClassSize.align(ClassSize.OBJECT +
+    Bytes.SIZEOF_INT * 3 + (ClassSize.ARRAY + ClassSize.REFERENCE) * 2 +
+    Bytes.SIZEOF_LONG);
+
+
+  /**
+   * Default initial size of the array backing this list.
+   */
+  private static final int DEFAULT_SIZE = 1;
+
+  /**
+   * The scaling factor we use to resize the backing buffer when the list
+   * needs to grow.
+   */
+  private static final float SCALE_FACTOR = 1.5f;
+
+  /**
+   * Number of bits in a word.
+   */
+  private static final int WORD_BIT_COUNT = 64;
+  /**
+   * The power of two to reach the word size. This is the shift needed to get
+   * the index from a word.
+   */
+  private static final int INDEX_SHIFT = 6;
+
+  /**
+   * If the length is greater than this threshold than binary search will be
+   * used instead of scanning.
+   */
+  private static final int BINARY_SEARCH_THRESHOLD = 16;
+
+  /**
+   * The array of indices. Length must match the length of the array of words.
+   * Each index denote the index of the mathcing word in the words array in a
+   * deflated  BitSet.
+   * <p/>
+   * Package protected to allow direct access by friends.
+   */
+  int[] indices;
+  /**
+   * An array of non-zero words. The indexes of these words are kept in the
+   * indices array.
+   * <p/>
+   * Package protected to allow direct access by friends.
+   */
+  long[] words;
+
+  /**
+   * Number of used words.
+   * <p/>
+   * Package protected to allow direct access by friends.
+   */
+  int length;
+  /**
+   * Number of non-zero bits.
+   */
+  private int size;
+  /**
+   * The maximum integer that can be stored in this set.
+   * <p/>
+   * Useful when converting this class to a {@link BitSet}.
+   */
+  private int capacity;
+  /**
+   * The heap size.
+   */
+  private long heapSize;
+
+  SparseBitSet() {
+    capacity = LARGEST;
+    clear();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public int size() {
+    return size;
+  }
+
+  /**
+   * Converts this sparse bit set to deflated {@link BitSet}.
+   *
+   * @return the deflated bit set
+   */
+  public BitSet toBitSet() {
+    assert capacity <= LARGEST;
+    BitSet bitSet = new BitSet(capacity);
+    if (size > 0) {
+      for (int i = 0; i < length; i++) {
+        bitSet.words[indices[i]] = words[i];
+      }
+      bitSet.size = this.size;
+      bitSet.minElement = indices[0] * WORD_BIT_COUNT +
+        Long.numberOfTrailingZeros(words[0]);
+      bitSet.maxElement = indices[length - 1] * WORD_BIT_COUNT + WORD_BIT_COUNT
+        - 1 - Long.numberOfLeadingZeros(words[length - 1]);
+    }
+    return bitSet;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void clear() {
+    indices = null;
+    words = null;
+    size = 0;
+    length = 0;
+    heapSize = FIXED_SIZE;
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return size() == 0;
+  }
+
+  @Override
+  public int capacity() {
+    return capacity;
+  }
+
+  /**
+   * Adjusts the capacity of this set. Can only increase the capacity.
+   *
+   * @param capacity the capacity of this set.
+   */
+  public void setCapacity(int capacity) {
+    assert capacity >= SMALLEST && capacity <= LARGEST;
+    assert length == 0 || capacity >= indices[length - 1] << INDEX_SHIFT;
+    this.capacity = capacity;
+  }
+
+
+  /**
+   * Grows the backing array to the requested size.
+   *
+   * @param element the element for which the capacity needs to be ensured
+   * @return the word index for this element
+   */
+  private int ensureArrayCapacity(int element) {
+    assert element >= SMALLEST && element < capacity;
+    int word = element >> INDEX_SHIFT;
+    assert length < 0 || word >= indices[length - 1];
+    // If we need to resize
+    if (length == indices.length && word > indices[length - 1]) {
+      // Calculate the new size, growing slowly at the start to avoid
+      // overallocation too early.
+      int newArrayLength = (int) (indices.length * SCALE_FACTOR + 1);
+
+      // Create the new array
+      int[] newIndices = new int[newArrayLength];
+      long[] newWords = new long[newArrayLength];
+
+      // Populate the new backing array
+      System.arraycopy(indices, 0, newIndices, 0, length);
+      System.arraycopy(words, 0, newWords, 0, length);
+      indices = newIndices;
+      words = newWords;
+
+      heapSize = FIXED_SIZE +
+        (Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT) * newArrayLength;
+    }
+    return word;
+  }
+
+  /**
+   * Add the next element in sorted order to this bitset.
+   *
+   * @param element the element to add
+   */
+  void addNext(int element) {
+    if (length == 0) {
+      addFirstElement(element);
+    } else {
+      addNextElement(element);
+    }
+  }
+
+  /**
+   * Adds the next elemenet. Used to add all the elements except the first.
+   *
+   * @param element the element to add
+   */
+  private void addNextElement(int element) {
+    int word = ensureArrayCapacity(element);
+    if (word != indices[length - 1]) {
+      indices[length++] = word;
+    }
+    assert Long.highestOneBit(words[length - 1]) < (1L << element) ||
+      (words[length - 1] >= 0 && (1L << element) < 0)
+      : "element=" + element;
+    words[length - 1] |= (1L << element);
+    size++;
+  }
+
+  /**
+   * Adds the first element. This method allocates memory.
+   *
+   * @param firstElement the first element of this set
+   */
+  private void addFirstElement(int firstElement) {
+    assert firstElement >= SMALLEST && firstElement < capacity;
+    indices = new int[DEFAULT_SIZE];
+    words = new long[DEFAULT_SIZE];
+    length = 1;
+    size = 1;
+    // put the first element
+    indices[0] = firstElement >> INDEX_SHIFT;
+    words[0] |= (1L << firstElement);
+    heapSize = FIXED_SIZE +
+      (Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT) * DEFAULT_SIZE;
+
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean contains(int element) {
+    int word = element >> INDEX_SHIFT;
+    if (length < BINARY_SEARCH_THRESHOLD) {
+      for (int i = 0; i < indices.length; i++) {
+        if (indices[i] == word) {
+          return (words[i] & (1L << element)) != 0L;
+        }
+      }
+    } else {
+      int index = Arrays.binarySearch(indices, 0, length, word);
+      if (index >= 0) {
+        return (words[index] & (1L << element)) != 0L;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet complement() {
+    return toBitSet().complement();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet intersect(IntSet other) {
+    return toBitSet().intersect(other);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet unite(IntSet other) {
+    return toBitSet().unite(other);
+  }
+
+  @Override
+  public IntSet subtract(IntSet other) {
+    return toBitSet().subtract(other);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSet difference(IntSet other) {
+    return toBitSet().difference(other);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public IntSetIterator iterator() {
+    return new IntSetIteratorImpl();
+  }
+
+  /**
+   * {@inheritDoc}
+   * <p/>
+   * Clones by creating a full bitset out of this object.
+   */
+  @Override
+  public IntSet clone() {
+    SparseBitSet result = (SparseBitSet) super.clone();
+    if (this.indices != null) {
+      result.indices = this.indices.clone();
+      result.words = this.words.clone();
+    }
+    return result;
+  }
+
+  @Override
+  public long heapSize() {
+    return heapSize;
+  }
+
+  private class IntSetIteratorImpl implements IntSetIterator {
+    private static final int PREFETCH_SIZE = 11;
+
+    private int[] nextValues;
+    private int nextValueIndex;
+    private int numValues;
+    private int nextWordIndex;
+    private int largestNextValue;
+
+    private IntSetIteratorImpl() {
+      nextValues = new int[PREFETCH_SIZE];
+      nextWordIndex = 0;
+      largestNextValue = -1;
+      fill();
+    }
+
+    /**
+     * Fills the cache with the next set of bits.
+     */
+    private void fill() {
+      numValues = 0;
+      nextValueIndex = 0;
+      while (nextWordIndex < length && numValues < nextValues.length) {
+        long word = words[nextWordIndex];
+        int base = indices[nextWordIndex] * WORD_BIT_COUNT;
+        int shiftAmount = largestNextValue - base + 1;
+        if (shiftAmount > 0) {
+          word = ((word >>> shiftAmount) << shiftAmount);
+        }
+        while (word != 0 && numValues < nextValues.length) {
+          int setBitIndex = Long.numberOfTrailingZeros(word);
+          largestNextValue = nextValues[numValues++] = setBitIndex + base;
+          word ^= 1L << setBitIndex;
+        }
+        if (word == 0) {
+          nextWordIndex++;
+        }
+      }
+    }
+
+    @Override
+    public boolean hasNext() {
+      return nextWordIndex < length || nextValueIndex < numValues;
+    }
+
+    @Override
+    public int next() {
+      if (nextValueIndex < numValues) {
+        return nextValues[nextValueIndex++];
+      } else {
+        fill();
+        if (nextValueIndex < numValues) {
+          return nextValues[nextValueIndex++];
+        }
+      }
+      throw new IndexOutOfBoundsException();
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean equals(Object other) {
+    if (this == other) return true;
+    if (other == null || getClass() != other.getClass()) return false;
+
+    SparseBitSet otherBitSet = (SparseBitSet) other;
+
+    return heapSize == otherBitSet.heapSize &&
+      capacity == otherBitSet.capacity &&
+      length == otherBitSet.length &&
+      Arrays.equals(indices, otherBitSet.indices) &&
+      Arrays.equals(words, otherBitSet.words);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(words) ^ Arrays.hashCode(indices);
+  }
+
+
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestIdxHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestIdxHBaseCluster.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestIdxHBaseCluster.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestIdxHBaseCluster.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import junit.framework.Assert;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor;
+import org.apache.hadoop.hbase.client.idx.IdxIndexDescriptor;
+import org.apache.hadoop.hbase.client.idx.IdxQualifierType;
+import org.apache.hadoop.hbase.client.idx.IdxScan;
+import org.apache.hadoop.hbase.client.idx.exp.Comparison;
+import org.apache.hadoop.hbase.client.idx.exp.Expression;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.regionserver.IdxRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * tests administrative functions
+ */
+public class TestIdxHBaseCluster extends TestHBaseCluster {
+  private static final String FIXED_PART = "Some string with a rolling value ";
+
+  /**
+   * constructor
+   */
+  public TestIdxHBaseCluster() {
+    super();
+    conf.setClass(HConstants.REGION_IMPL, IdxRegion.class, IdxRegion.class);
+    // Force flushes and compactions
+    conf.set("hbase.hregion.memstore.flush.size", "262144");
+  }
+
+  /**
+   * Tests reading and writing concurrently to a table.
+   *
+   * @throws IOException exception
+   */
+  @SuppressWarnings({"unchecked"})
+  public void testConcurrentReadWrite() throws IOException {
+    int maxRows = 20000;
+    Random random = new Random(4111994L);
+    byte[][] rows = new byte[maxRows][];
+    for (int i = 0; i < rows.length; i++) {
+      rows[i] = Bytes.toBytes(random.nextInt());
+    }
+    final AtomicInteger sequence = new AtomicInteger(0);
+
+    HTableDescriptor desc = new HTableDescriptor("testConcurrentReadWrite");
+    byte[] family = Bytes.toBytes("concurrentRW");
+    byte[] qualifier = Bytes.toBytes("strings");
+    IdxColumnDescriptor descriptor = new IdxColumnDescriptor(family);
+    descriptor.addIndexDescriptor(new IdxIndexDescriptor(qualifier,
+      IdxQualifierType.CHAR_ARRAY));
+    desc.addFamily(descriptor);
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    HTable table = new HTable(conf, desc.getName());
+
+    ExecutorService service = Executors.newCachedThreadPool();
+    for (int i = 0; i < 5; i++) {
+      service.submit(new Writer(table, family, qualifier, sequence, rows));
+    }
+
+    byte[] value = Bytes.toBytes((FIXED_PART + 0).toCharArray());
+    IdxScan idxScan = new IdxScan();
+    idxScan.setExpression(Expression.comparison(family, qualifier,
+      Comparison.Operator.EQ, value));
+    idxScan.setFilter(new SingleColumnValueFilter(family, qualifier,
+      CompareFilter.CompareOp.EQUAL, value));
+    idxScan.setCaching(1000);
+
+    int count = 0;
+    int finalCount = maxRows / 10;
+    int printCount = 0;
+    while (count < finalCount) {
+      ResultScanner scanner = table.getScanner(idxScan);
+      int nextCount = 0;
+      for (Result res : scanner) {
+        nextCount++;
+        Assert.assertTrue(Arrays.equals(res.getValue(family, qualifier),
+          value));
+      }
+      if (nextCount > printCount + 1000) {
+        System.out.printf("++ found %d matching rows\n", nextCount);
+        printCount = nextCount;
+      }
+      String infoString = "nextCount=" + nextCount + ", count=" + count +
+        ", finalCount=" + finalCount;
+      boolean condition = nextCount >= count && nextCount <= finalCount;
+      if (!condition) {
+        System.out.println("-------- " + infoString);
+      }
+      Assert.assertTrue(infoString, condition);
+      count = nextCount;
+    }
+    service.shutdown();
+  }
+
+
+  private static class Writer implements Callable {
+
+    private HTable table;
+    private byte[] family;
+    private byte[] qualifier;
+    private AtomicInteger sequence;
+    private byte[][] rows;
+
+    private Writer(HTable table, byte[] family, byte[] qualifier,
+      AtomicInteger sequence, byte[][] rows) {
+      this.table = table;
+      this.family = family;
+      this.qualifier = qualifier;
+      this.sequence = sequence;
+      this.rows = rows;
+    }
+
+    @Override
+    public Object call() throws Exception {
+      while (true) {
+        int num = sequence.getAndIncrement();
+        if (num % 10 == 0) {
+          System.out.printf("-- writing row %d\n", num);
+        }
+        if (num <= rows.length) {
+          Put put = new Put(rows[num]);
+          char[] chars = (FIXED_PART + num % 10).toCharArray();
+          put.add(family, qualifier, Bytes.toBytes(chars));
+          table.put(put);
+        } else {
+          return null;
+        }
+        //Thread.sleep(0L, 100000); // sleep .1 millis
+      }
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestIdxMasterAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestIdxMasterAdmin.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestIdxMasterAdmin.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestIdxMasterAdmin.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,31 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.hbase.regionserver.IdxRegion;
+
+/** tests administrative functions */
+public class TestIdxMasterAdmin extends TestMasterAdmin {
+  /** constructor */
+  public TestIdxMasterAdmin() {
+    super();
+    conf.setClass(HConstants.REGION_IMPL, IdxRegion.class, IdxRegion.class);
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestWritableHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestWritableHelper.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestWritableHelper.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/TestWritableHelper.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.client.idx.exp.Comparison;
+import org.apache.hadoop.hbase.client.idx.exp.Expression;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+import java.io.IOException;
+
+/**
+ * Tests the {@link org.apache.hadoop.hbase.WritableHelper}.
+ */
+public class TestWritableHelper extends TestCase {
+  /**
+   * Tests the {@link org.apache.hadoop.hbase.WritableHelper#instanceForName(String,
+   * Class)} works as expected.
+   */
+  public void testInstanceForName() {
+    Expression expression = WritableHelper.instanceForName(Comparison.class.getName(), Expression.class);
+
+    Assert.assertNotNull("Instance should not be null", expression);
+    Assert.assertEquals("Wrong class returned", Comparison.class, expression.getClass());
+  }
+
+  /**
+   * Tests the {@link org.apache.hadoop.hbase.WritableHelper#instanceForName(String,
+   * Class)} works as expected when an invalid class name is provided.
+   */
+  public void testInstanceForNameInvalidClassName() {
+    try {
+      WritableHelper.instanceForName(Comparison.class.getName() + "1", Expression.class);
+      Assert.fail("An exception should have been thrown");
+    } catch (Exception e) {
+      Assert.assertEquals("Wrong exception was thrown", IllegalArgumentException.class, e.getClass());
+    }
+  }
+
+  /**
+   * Tests the that {@link org.apache.hadoop.hbase.WritableHelper#writeInstance(java.io.DataOutput,
+   * org.apache.hadoop.io.Writable)} fails as expected when null is provided.
+   * @throws IOException if an error occurs
+   */
+  public void testWriteInstanceFailsWithNull() throws IOException {
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    try {
+      WritableHelper.writeInstance(dataOutputBuffer, null);
+      Assert.fail("Expected an exception");
+    } catch (Exception e) {
+      Assert.assertEquals("Wrong exception thrown when null was provided", IllegalArgumentException.class, e.getClass());
+    }
+  }
+
+  /**
+   * Tests the that {@link org.apache.hadoop.hbase.WritableHelper#writeInstance(java.io.DataOutput,
+   * org.apache.hadoop.io.Writable)} and {@link org.apache.hadoop.hbase.WritableHelper#readInstance(java.io.DataInput,
+   * Class)} works as expected.
+   * @throws IOException if an error occurs
+   */
+  public void testWriteReadInstance() throws IOException {
+    Expression expression = Expression.comparison("columnName1", "qualifier1", Comparison.Operator.EQ, Bytes.toBytes("value"));
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    WritableHelper.writeInstance(dataOutputBuffer, expression);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    Expression clonedExpression = WritableHelper.readInstance(dataInputBuffer, Expression.class);
+
+    Assert.assertEquals("The expression was not the same after being written and read", expression, clonedExpression);
+  }
+
+  /**
+   * Tests the that {@link org.apache.hadoop.hbase.WritableHelper#writeInstanceNullable(java.io.DataOutput,
+   * org.apache.hadoop.io.Writable)} and {@link org.apache.hadoop.hbase.WritableHelper#readInstanceNullable(java.io.DataInput,
+   * Class)} works as expected.
+   * @throws IOException if an error occurs
+   */
+  public void testWriteReadInstanceNullable() throws IOException {
+    Expression expression = Expression.comparison("columnName1", "qualifier1", Comparison.Operator.EQ, Bytes.toBytes("value"));
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    WritableHelper.writeInstanceNullable(dataOutputBuffer, expression);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    Expression clonedExpression = WritableHelper.readInstanceNullable(dataInputBuffer, Expression.class);
+
+    Assert.assertEquals("The expression was not the same after being written and read", expression, clonedExpression);
+  }
+
+  /**
+   * Tests the that {@link org.apache.hadoop.hbase.WritableHelper#writeInstanceNullable(java.io.DataOutput,
+   * org.apache.hadoop.io.Writable)} and {@link org.apache.hadoop.hbase.WritableHelper#readInstanceNullable(java.io.DataInput,
+   * Class)} works as expected when null is provided.
+   * @throws IOException if an error occurs
+   */
+  public void testWriteReadInstanceNullableWithNull() throws IOException {
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    WritableHelper.writeInstanceNullable(dataOutputBuffer, null);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    Expression clonedExpression = WritableHelper.readInstanceNullable(dataInputBuffer, Expression.class);
+
+    Assert.assertNull("A null value was expected", clonedExpression);
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxColumnDescriptor.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxColumnDescriptor.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxColumnDescriptor.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client.idx;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor}.
+ */
+public class TestIdxColumnDescriptor extends TestCase {
+  /**
+   * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#addIndexDescriptor(IdxIndexDescriptor)}
+   * method.
+   */
+  public void testAddIndexDescriptor() throws IOException {
+    IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName");
+    IdxIndexDescriptor indexDescriptor = new IdxIndexDescriptor(Bytes.toBytes("qualifer"), IdxQualifierType.INT);
+
+    Assert.assertEquals("The column desciptor should not contain any index descriptors",
+        0, descriptor.getIndexDescriptors().size());
+    descriptor.addIndexDescriptor(indexDescriptor);
+    Assert.assertEquals("The column desciptor should contain a index descriptor",
+        1, descriptor.getIndexDescriptors().size());
+  }
+
+  /**
+   * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#addIndexDescriptor(IdxIndexDescriptor)}
+   * method when an index descriptor already exists for the qualifier.
+   */
+  public void testAddIndexDescriptorWithExisting() throws IOException {
+    IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName");
+    IdxIndexDescriptor indexDescriptor = new IdxIndexDescriptor(Bytes.toBytes("qualifer"), IdxQualifierType.INT);
+    IdxIndexDescriptor indexDescriptor2 = new IdxIndexDescriptor(Bytes.toBytes("qualifer"), IdxQualifierType.LONG);
+
+    descriptor.addIndexDescriptor(indexDescriptor);
+    try {
+      descriptor.addIndexDescriptor(indexDescriptor2);
+      Assert.fail("An exception should have been thrown");
+    } catch (Exception e) {
+      Assert.assertEquals("Wrong exception thrown", IllegalArgumentException.class, e.getClass());
+    }
+  }
+
+  /**
+   * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#removeIndexDescriptor(byte[])}
+   * method.
+   */
+  public void testRemoveIndexDescriptor() throws IOException {
+    IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName");
+    IdxIndexDescriptor indexDescriptor = new IdxIndexDescriptor(Bytes.toBytes("qualifer"), IdxQualifierType.INT);
+
+    Assert.assertEquals("The column desciptor should not contain any index descriptors",
+        0, descriptor.getIndexDescriptors().size());
+    descriptor.addIndexDescriptor(indexDescriptor);
+    Assert.assertEquals("The column desciptor should contain a index descriptor",
+        1, descriptor.getIndexDescriptors().size());
+    Assert.assertTrue("The remove method should have returned true",
+        descriptor.removeIndexDescriptor(Bytes.toBytes("qualifer")));
+    Assert.assertEquals("The column desciptor should not contain any index descriptors",
+        0, descriptor.getIndexDescriptors().size());
+  }
+
+  /**
+   * Tests the {@link IdxColumnDescriptor#getIndexedQualifiers()} method when an
+   * index descriptor already exists for the qualifier.
+   */
+  public void testGetIndexedQualifiers() throws IOException {
+    IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName");
+    byte[] qualifierName1 = Bytes.toBytes("qualifer1");
+    IdxIndexDescriptor indexDescriptor1
+        = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT);
+    byte[] qualifierName2 = Bytes.toBytes("qualifer2");
+    IdxIndexDescriptor indexDescriptor2
+        = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.LONG);
+
+    descriptor.addIndexDescriptor(indexDescriptor1);
+    descriptor.addIndexDescriptor(indexDescriptor2);
+
+    Set<ImmutableBytesWritable> indexedQualifiers = descriptor.getIndexedQualifiers();
+    Assert.assertNotNull("The set of indexed qualifiers should not be null",
+        indexedQualifiers);
+    Assert.assertEquals("The column desciptor should contain index qualifiers",
+        2, indexedQualifiers.size());
+
+    Assert.assertTrue("The set of indexed qualifiers should contain the key",
+        indexedQualifiers.contains(new ImmutableBytesWritable(qualifierName1)));
+    Assert.assertTrue("The set of indexed qualifiers should contain the key",
+        indexedQualifiers.contains(new ImmutableBytesWritable(qualifierName2)));
+  }
+
+  /**
+   * Tests the {@link IdxColumnDescriptor#write(java.io.DataOutput)} and {@link
+   * org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#readFields(java.io.DataInput)}
+   * methods.
+   * @throws java.io.IOException if an error occurs
+   */
+  public void testWritable() throws IOException {
+    IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName");
+    byte[] qualifierName1 = Bytes.toBytes("qualifer1");
+    IdxIndexDescriptor indexDescriptor1
+        = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT);
+    byte[] qualifierName2 = Bytes.toBytes("qualifer2");
+    IdxIndexDescriptor indexDescriptor2
+        = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.LONG);
+
+    descriptor.addIndexDescriptor(indexDescriptor1);
+    descriptor.addIndexDescriptor(indexDescriptor2);
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    descriptor.write(dataOutputBuffer);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    IdxColumnDescriptor clonedDescriptor = new IdxColumnDescriptor();
+    clonedDescriptor.readFields(dataInputBuffer);
+
+    Assert.assertEquals("The expression was not the same after being written and read", descriptor, clonedDescriptor);
+  }
+
+  /**
+   * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#compareTo(org.apache.hadoop.hbase.HColumnDescriptor)}
+   * method when the two instances are the same.
+   */
+  public void testCompareToWhenSame() throws IOException {
+    IdxColumnDescriptor descriptor1 = new IdxColumnDescriptor("familyName1");
+    byte[] qualifierName1 = Bytes.toBytes("qualifer1");
+    IdxIndexDescriptor indexDescriptor1
+        = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT);
+    descriptor1.addIndexDescriptor(indexDescriptor1);
+
+    IdxColumnDescriptor descriptor2 = new IdxColumnDescriptor("familyName1");
+    byte[] qualifierName2 = Bytes.toBytes("qualifer1");
+    IdxIndexDescriptor indexDescriptor2
+        = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.INT);
+    descriptor2.addIndexDescriptor(indexDescriptor2);
+
+    Assert.assertTrue("The compare to should have returned 0", descriptor1.compareTo(descriptor2) == 0);
+  }
+
+  /**
+   * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#compareTo(org.apache.hadoop.hbase.HColumnDescriptor)}
+   * method when the two instances are different.
+   */
+  public void testCompareToWhenDifferent() throws IOException {
+    IdxColumnDescriptor descriptor1 = new IdxColumnDescriptor("familyName1");
+    byte[] qualifierName1 = Bytes.toBytes("qualifer1");
+    IdxIndexDescriptor indexDescriptor1
+        = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT);
+    descriptor1.addIndexDescriptor(indexDescriptor1);
+
+    IdxColumnDescriptor descriptor2 = new IdxColumnDescriptor("familyName2");
+    byte[] qualifierName2 = Bytes.toBytes("qualifer2");
+    IdxIndexDescriptor indexDescriptor2
+        = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.INT);
+    descriptor2.addIndexDescriptor(indexDescriptor2);
+
+    Assert.assertTrue("The compare to should not have returned 0", descriptor1.compareTo(descriptor2) != 0);
+  }
+
+  /**
+   * Tests that two column descriptors are equal when the Idx decorator isn't being used.
+   */
+  public void testCompareToWithoutDecorator() throws IOException {
+    IdxColumnDescriptor descriptor1 = new IdxColumnDescriptor("familyName1");
+    byte[] qualifierName1 = Bytes.toBytes("qualifer1");
+    IdxIndexDescriptor indexDescriptor1
+        = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT);
+    descriptor1.addIndexDescriptor(indexDescriptor1);
+
+    IdxColumnDescriptor descriptor2 = new IdxColumnDescriptor("familyName2");
+    byte[] qualifierName2 = Bytes.toBytes("qualifer2");
+    IdxIndexDescriptor indexDescriptor2
+        = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.INT);
+    descriptor2.addIndexDescriptor(indexDescriptor2);
+
+    HColumnDescriptor descriptor2WithoutDec = new HColumnDescriptor(descriptor2);
+
+    Assert.assertTrue("The compare to should not have returned 0", descriptor1.compareTo(descriptor2WithoutDec) != 0);
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxIndexDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxIndexDescriptor.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxIndexDescriptor.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxIndexDescriptor.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client.idx;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+import java.io.IOException;
+
+/**
+ * Tests the {@link IdxIndexDescriptor} class.
+ */
+public class TestIdxIndexDescriptor extends TestCase {
+  /**
+   * Tests the {@link IdxIndexDescriptor#write(java.io.DataOutput)} and {@link
+   * org.apache.hadoop.hbase.client.idx.IdxIndexDescriptor#readFields(java.io.DataInput)}
+   * methods.
+   * @throws java.io.IOException if an error occurs
+   */
+  public void testWritable() throws IOException {
+    byte[] qualifierName1 = Bytes.toBytes("qualifer1");
+    IdxIndexDescriptor descriptor
+        = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT);
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    descriptor.write(dataOutputBuffer);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    IdxIndexDescriptor clonedDescriptor = new IdxIndexDescriptor();
+    clonedDescriptor.readFields(dataInputBuffer);
+
+    Assert.assertEquals("The descriptor was not the same after being written and read", descriptor, clonedDescriptor);
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxScan.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxScan.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxScan.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/TestIdxScan.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client.idx;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.client.idx.exp.Comparison;
+import org.apache.hadoop.hbase.client.idx.exp.Expression;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+import java.io.IOException;
+
+/**
+ * Tests the {@link IdxScan} class.
+ */
+public class TestIdxScan extends TestCase {
+
+  /**
+   * Tests that the writable and readFields methods work as expected.
+   *
+   * @throws java.io.IOException if an IO error occurs
+   */
+  public void testWritable() throws IOException {
+    Expression expression = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("value"));
+
+    IdxScan idxScan = new IdxScan(expression);
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    idxScan.write(dataOutputBuffer);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    IdxScan clonedScan = new IdxScan();
+    clonedScan.readFields(dataInputBuffer);
+
+    Assert.assertEquals("The expression was not the same after being written and read", idxScan.getExpression(), clonedScan.getExpression());
+  }
+
+  /**
+   * Tests that the writable and readFields methods work as expected.
+   *
+   * @throws java.io.IOException if an IO error occurs
+   */
+  public void testWritableNullExpression() throws IOException {
+    IdxScan idxScan = new IdxScan();
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    idxScan.write(dataOutputBuffer);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    IdxScan clonedScan = new IdxScan();
+    clonedScan.readFields(dataInputBuffer);
+
+    Assert.assertEquals("The expression was not the same after being written and read", idxScan.getExpression(), clonedScan.getExpression());
+  }
+}

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/exp/TestComparison.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/exp/TestComparison.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/exp/TestComparison.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/exp/TestComparison.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client.idx.exp;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.client.idx.exp.Comparison;
+import org.apache.hadoop.hbase.client.idx.exp.Expression;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+import java.io.IOException;
+
+/**
+ * Tests the expression class.
+ */
+public class TestComparison extends TestCase {
+  /**
+   * Tests the constuctor.
+   */
+  public void testConstructor() {
+    byte[] columnName1 = Bytes.toBytes("columnName1");
+    byte[] qualifer1 = Bytes.toBytes("qualifier1");
+    byte[] value1 = Bytes.toBytes("value1");
+    Comparison.Operator operator1 = Comparison.Operator.EQ;
+
+    Comparison comparison = new Comparison(columnName1, qualifer1, operator1, value1);
+
+    Assert.assertEquals("columnName was incorrect", columnName1, comparison.getColumnName());
+    Assert.assertEquals("qualifier was incorrect", qualifer1, comparison.getQualifier());
+    Assert.assertEquals("value was incorrect", value1, comparison.getValue());
+    Assert.assertEquals("operator was incorrect", operator1, comparison.getOperator());
+  }
+
+  /**
+   * Tests that the equals method works.
+   */
+  public void testEquals() {
+    Expression expression1 = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("value"));
+    Expression expression2 = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("value"));
+
+    Assert.assertTrue("equals didn't work as expected", expression1.equals(expression2));
+  }
+
+  /**
+   * Tests that the equals method works.
+   */
+  public void testEqualsFalse() {
+    Expression expression1 = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("value"));
+    Expression expression2 = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("othervalue"));
+
+    Assert.assertFalse("equals didn't work as expected", expression1.equals(expression2));
+  }
+
+  /**
+   * Tests the an comparison can be written and read and still be equal.
+   *
+   * @throws java.io.IOException if an io error occurs
+   */
+  public void testWritable() throws IOException {
+    Expression expression = Expression.comparison("columnName1", "qualifier1", Comparison.Operator.EQ, Bytes.toBytes("value"));
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    expression.write(dataOutputBuffer);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    Expression clonedExpression = new Comparison();
+    clonedExpression.readFields(dataInputBuffer);
+
+    Assert.assertEquals("The expression was not the same after being written and read", expression, clonedExpression);
+  }
+}
\ No newline at end of file

Added: hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/exp/TestExpression.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/exp/TestExpression.java?rev=896138&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/exp/TestExpression.java (added)
+++ hadoop/hbase/branches/0.20/src/contrib/indexed/src/test/org/apache/hadoop/hbase/client/idx/exp/TestExpression.java Tue Jan  5 17:26:49 2010
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client.idx.exp;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.client.idx.exp.And;
+import org.apache.hadoop.hbase.client.idx.exp.Comparison;
+import org.apache.hadoop.hbase.client.idx.exp.Expression;
+import org.apache.hadoop.hbase.client.idx.exp.Or;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+import java.io.IOException;
+
+/**
+ * Tests the expression class.
+ */
+public class TestExpression extends TestCase {
+  /**
+   * Tests that the methods to build an expression all result in equal instances
+   * when provided the same input.
+   */
+  public void testExpressionBuilder() {
+    String columnName1 = "columnName1";
+    String qualifer1 = "qualifier1";
+    byte[] value1 = Bytes.toBytes("value1");
+    Comparison.Operator operator1 = Comparison.Operator.EQ;
+
+    String columnName2 = "columnName2";
+    String qualifer2 = "qualifier2";
+    byte[] value2 = Bytes.toBytes("value2");
+    Comparison.Operator operator2 = Comparison.Operator.GT;
+
+    String columnName3 = "columnName3";
+    String qualifer3 = "qualifier3";
+    byte[] value3 = Bytes.toBytes("value3");
+    Comparison.Operator operator3 = Comparison.Operator.LT;
+
+    Expression expression1 = new Or(
+        new Comparison(columnName1, qualifer1, operator1, value1),
+        new And(
+            new Comparison(columnName2, qualifer2, operator2, value2),
+            new Comparison(columnName3, qualifer3, operator3, value3)
+        )
+    );
+
+    Expression expression2 = Expression
+        .or(
+            Expression.comparison(columnName1, qualifer1, operator1, value1)
+        )
+        .or(
+            Expression.and()
+                .and(Expression.comparison(columnName2, qualifer2, operator2, value2))
+                .and(Expression.comparison(columnName3, qualifer3, operator3, value3))
+        );
+
+    Expression expression3 = Expression.or(
+        Expression.comparison(columnName1, qualifer1, operator1, value1),
+        Expression.and(
+            Expression.comparison(columnName2, qualifer2, operator2, value2),
+            Expression.comparison(columnName3, qualifer3, operator3, value3)
+        )
+    );
+
+    Assert.assertTrue("The expressions didn't match", expression1.equals(expression2) && expression1.equals(expression3));
+  }
+
+  /**
+   * Tests the an expression tree can be written and read and still be equal.
+   *
+   * @throws java.io.IOException if an io error occurs
+   */
+  public void testWritable() throws IOException {
+    Expression expression = Expression.or(
+        Expression.comparison("columnName1", "qualifier1", Comparison.Operator.EQ, Bytes.toBytes("value")),
+        Expression.and(
+            Expression.comparison("columnName2", "qualifier2", Comparison.Operator.GT, Bytes.toBytes("value2")),
+            Expression.comparison("columnName3", "qualifier3", Comparison.Operator.LT, Bytes.toBytes("value3"))
+        )
+    );
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    expression.write(dataOutputBuffer);
+
+    DataInputBuffer dataInputBuffer = new DataInputBuffer();
+    dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength());
+
+    Expression clonedExpression = new Or();
+    clonedExpression.readFields(dataInputBuffer);
+
+    Assert.assertEquals("The expression was not the same after being written and read", expression, clonedExpression);
+  }
+}



Mime
View raw message