incubator-blur-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amccu...@apache.org
Subject [29/92] [abbrv] [partial] Fixed BLUR-126.
Date Tue, 11 Jun 2013 02:41:15 GMT
http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java b/blur-query/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java
new file mode 100644
index 0000000..df3c825
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/analysis/BlurAnalyzer.java
@@ -0,0 +1,514 @@
+package org.apache.blur.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import static org.apache.blur.lucene.LuceneVersionConstant.LUCENE_VERSION;
+import static org.apache.blur.util.BlurConstants.PRIME_DOC;
+import static org.apache.blur.util.BlurConstants.RECORD_ID;
+import static org.apache.blur.util.BlurConstants.ROW_ID;
+import static org.apache.blur.util.BlurConstants.SUPER;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.Reader;
+import java.lang.reflect.Constructor;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.blur.thirdparty.thrift_0_9_0.TException;
+import org.apache.blur.thirdparty.thrift_0_9_0.protocol.TJSONProtocol;
+import org.apache.blur.thirdparty.thrift_0_9_0.transport.TMemoryBuffer;
+import org.apache.blur.thirdparty.thrift_0_9_0.transport.TMemoryInputTransport;
+import org.apache.blur.thrift.generated.AlternateColumnDefinition;
+import org.apache.blur.thrift.generated.AnalyzerDefinition;
+import org.apache.blur.thrift.generated.ColumnDefinition;
+import org.apache.blur.thrift.generated.ColumnFamilyDefinition;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.AnalyzerWrapper;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Version;
+
+public final class BlurAnalyzer extends AnalyzerWrapper {
+
+  public enum TYPE {
+    LONG, DOUBLE, FLOAT, INTEGER, TEXT
+  }
+
+  @SuppressWarnings("serial")
+  private static Set<String> typeNameCache = new HashSet<String>() {
+    {
+      TYPE[] values = TYPE.values();
+      for (TYPE t : values) {
+        add(t.name());
+      }
+    }
+  };
+
+  private static final String STANDARD = "org.apache.lucene.analysis.standard.StandardAnalyzer";
+  public static final BlurAnalyzer BLANK_ANALYZER = new BlurAnalyzer(new KeywordAnalyzer());
+
+  private static final Analyzer ERROR_ANALYZER = new Analyzer() {
+    @Override
+    protected TokenStreamComponents createComponents(String field, Reader reader) {
+      throw new RuntimeException("This analyzer should never be used.");
+    }
+  };
+  private static Map<String, Class<? extends Analyzer>> aliases = new HashMap<String, Class<? extends Analyzer>>();
+
+  private Set<String> _subIndexNames = new HashSet<String>();
+  private Map<String, Set<String>> _subIndexNameLookups = new HashMap<String, Set<String>>();
+  private Map<String, Boolean> _fullTextFields = new HashMap<String, Boolean>();
+  private Map<String, Boolean> _fullTextColumnFamilies = new HashMap<String, Boolean>();
+  private AnalyzerDefinition _analyzerDefinition;
+  private Analyzer _defaultAnalyzer;
+  private Map<String, Analyzer> _analyzers = new HashMap<String, Analyzer>();
+  private Map<String, TYPE> _typeLookup = new HashMap<String, BlurAnalyzer.TYPE>();
+  private Map<String, FieldType> _fieldTypes = new HashMap<String, FieldType>();
+
+  public Set<String> getSubIndexNames(String indexName) {
+    return _subIndexNameLookups.get(indexName);
+  }
+
+  public BlurAnalyzer(Analyzer analyzer) {
+    _analyzerDefinition = new AnalyzerDefinition();
+    _defaultAnalyzer = analyzer;
+  }
+
+  public BlurAnalyzer(AnalyzerDefinition analyzerDefinition) {
+    _analyzerDefinition = analyzerDefinition;
+    ColumnDefinition defaultDefinition = analyzerDefinition.getDefaultDefinition();
+    if (defaultDefinition == null) {
+      defaultDefinition = new ColumnDefinition(STANDARD, true, null);
+      analyzerDefinition.setDefaultDefinition(defaultDefinition);
+    }
+    _defaultAnalyzer = getAnalyzerByClassName(defaultDefinition.getAnalyzerClassName(), aliases, null, null,
+        _fieldTypes);
+    _analyzers = new HashMap<String, Analyzer>();
+    _analyzers.put(ROW_ID, ERROR_ANALYZER);
+    _analyzers.put(RECORD_ID, ERROR_ANALYZER);
+    _analyzers.put(PRIME_DOC, ERROR_ANALYZER);
+    _analyzers.put(PRIME_DOC, ERROR_ANALYZER);
+    _analyzers.put(SUPER, ERROR_ANALYZER);
+    load(_analyzers, _analyzerDefinition.columnFamilyDefinitions, _fullTextFields, _subIndexNameLookups,
+        _subIndexNames, _fullTextColumnFamilies, _typeLookup, _fieldTypes);
+  }
+
+  public BlurAnalyzer() {
+    this(new StandardAnalyzer(LUCENE_VERSION));
+  }
+
+  private Analyzer getAnalyzer(String name) {
+    Analyzer analyzer = _analyzers.get(name);
+    return analyzer;
+  }
+
+  public TYPE getTypeLookup(String field) {
+    TYPE type = _typeLookup.get(field);
+    if (type == null) {
+      return TYPE.TEXT;
+    }
+    return type;
+  }
+
+  public Query getNewRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) {
+    TYPE type = _typeLookup.get(field);
+    if (type == null) {
+      return null;
+    }
+    FieldType fieldType = _fieldTypes.get(field);
+    switch (type) {
+    case INTEGER:
+      int integerPrecisionStep = fieldType.numericPrecisionStep();
+      int integerMin = Integer.parseInt(part1);
+      int integerMax = Integer.parseInt(part2);
+      return NumericRangeQuery.newIntRange(field, integerPrecisionStep, integerMin, integerMax, startInclusive,
+          endInclusive);
+
+    case DOUBLE:
+      int doublePrecisionStep = fieldType.numericPrecisionStep();
+      double doubleMin = Double.parseDouble(part1);
+      double doubleMax = Double.parseDouble(part2);
+      return NumericRangeQuery.newDoubleRange(field, doublePrecisionStep, doubleMin, doubleMax, startInclusive,
+          endInclusive);
+
+    case FLOAT:
+      int floatPrecisionStep = fieldType.numericPrecisionStep();
+      float floatMin = Float.parseFloat(part1);
+      float floatMax = Float.parseFloat(part2);
+      return NumericRangeQuery.newFloatRange(field, floatPrecisionStep, floatMin, floatMax, startInclusive,
+          endInclusive);
+
+    case LONG:
+      int longPrecisionStep = fieldType.numericPrecisionStep();
+      long longMin = Long.parseLong(part1);
+      long longMax = Long.parseLong(part2);
+      return NumericRangeQuery.newLongRange(field, longPrecisionStep, longMin, longMax, startInclusive, endInclusive);
+
+    default:
+      return null;
+    }
+
+  }
+
+  public boolean isFullTextField(String fieldName) {
+    Boolean b = _fullTextFields.get(fieldName);
+    if (b != null) {
+      return b;
+    }
+    String cf = getColumnFamily(fieldName);
+    if (cf == null) {
+      return false;
+    }
+    b = _fullTextColumnFamilies.get(cf);
+    if (b != null) {
+      return b;
+    }
+    ColumnDefinition defaultDefinition = _analyzerDefinition.getDefaultDefinition();
+    if (defaultDefinition != null && defaultDefinition.fullTextIndex) {
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * This method decides on the field type for the given field by name. Sub
+   * fields will also be passed in the fieldName such as fam1.col.sub1.
+   * 
+   * @param fieldName
+   * @return the {@link FieldType}
+   */
+  public FieldType getFieldType(String field) {
+    FieldType fieldType = _fieldTypes.get(field);
+    if (fieldType == null) {
+      fieldType = new FieldType(TextField.TYPE_STORED);
+      fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    }
+    if (isSubFieldName(field)) {
+      fieldType.setStored(false);
+    }
+    return fieldType;
+  }
+
+  /**
+   * Checks if the fieldName is a sub field or not.
+   * 
+   * @param fieldName
+   *          the field name to check.
+   * @return boolean
+   */
+  public boolean isSubFieldName(String fieldName) {
+    return _subIndexNames.contains(fieldName);
+  }
+
+  /**
+   * Get field will return the proper field for the given {@link FieldType}.
+   * 
+   * @param fieldName
+   *          the field name.
+   * @param value
+   *          the value.
+   * @param fieldType
+   *          the {@link FieldType}.
+   * @return the new {@link Field}.
+   */
+  public Field getField(String fieldName, String value, FieldType fieldType) {
+    TYPE type = _typeLookup.get(fieldName);
+    if (type == null) {
+      return new Field(fieldName, value, fieldType);
+    }
+    switch (type) {
+    case INTEGER:
+      return new IntField(fieldName, Integer.parseInt(value), fieldType);
+    case DOUBLE:
+      return new DoubleField(fieldName, Double.parseDouble(value), fieldType);
+    case FLOAT:
+      return new FloatField(fieldName, Float.parseFloat(value), fieldType);
+    case LONG:
+      return new LongField(fieldName, Long.parseLong(value), fieldType);
+    default:
+      return new Field(fieldName, value, fieldType);
+    }
+  }
+
+  public String toJSON() {
+    TMemoryBuffer trans = new TMemoryBuffer(1024);
+    TJSONProtocol protocol = new TJSONProtocol(trans);
+    try {
+      _analyzerDefinition.write(protocol);
+    } catch (TException e) {
+      throw new RuntimeException(e);
+    }
+    trans.close();
+    byte[] array = trans.getArray();
+    return new String(array, 0, trans.length());
+  }
+
+  private String getColumnFamily(String fieldName) {
+    int index = fieldName.indexOf('.');
+    if (index < 0) {
+      return null;
+    }
+    return fieldName.substring(0, index);
+  }
+
+  public AnalyzerDefinition getAnalyzerDefinition() {
+    return _analyzerDefinition;
+  }
+
+  public void close() {
+
+  }
+
+  @Override
+  protected Analyzer getWrappedAnalyzer(String fieldName) {
+    Analyzer analyzer = getAnalyzer(fieldName);
+    return (analyzer != null) ? analyzer : _defaultAnalyzer;
+  }
+
+  @Override
+  protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+    return components;
+  }
+
+  public static BlurAnalyzer create(File file) throws IOException {
+    FileInputStream inputStream = new FileInputStream(file);
+    try {
+      return create(inputStream);
+    } finally {
+      inputStream.close();
+    }
+  }
+
+  public static BlurAnalyzer create(InputStream inputStream) throws IOException {
+    TMemoryInputTransport trans = new TMemoryInputTransport(getBytes(inputStream));
+    TJSONProtocol protocol = new TJSONProtocol(trans);
+    AnalyzerDefinition analyzerDefinition = new AnalyzerDefinition();
+    try {
+      analyzerDefinition.read(protocol);
+    } catch (TException e) {
+      throw new RuntimeException(e);
+    }
+    trans.close();
+    return new BlurAnalyzer(analyzerDefinition);
+  }
+
+  public static BlurAnalyzer create(String jsonStr) throws IOException {
+    InputStream inputStream = new ByteArrayInputStream(jsonStr.getBytes());
+    try {
+      return create(inputStream);
+    } finally {
+      inputStream.close();
+    }
+  }
+
+  public static BlurAnalyzer create(Path path) throws IOException {
+    FileSystem fileSystem = FileSystem.get(path.toUri(), new Configuration());
+    FSDataInputStream inputStream = fileSystem.open(path);
+    try {
+      return create(inputStream);
+    } finally {
+      inputStream.close();
+    }
+  }
+
+  private static byte[] getBytes(InputStream inputStream) throws IOException {
+    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
+    byte[] buffer = new byte[1024];
+    int num;
+    while ((num = inputStream.read(buffer)) != -1) {
+      outputStream.write(buffer, 0, num);
+    }
+    inputStream.close();
+    outputStream.close();
+    return outputStream.toByteArray();
+  }
+
+  private static void load(Map<String, Analyzer> analyzers, Map<String, ColumnFamilyDefinition> familyDefinitions,
+      Map<String, Boolean> fullTextFields, Map<String, Set<String>> subIndexNameLookups, Set<String> subIndexNames,
+      Map<String, Boolean> fullTextColumnFamilies, Map<String, TYPE> typeLookup, Map<String, FieldType> fieldTypes) {
+    if (familyDefinitions != null) {
+      for (String family : familyDefinitions.keySet()) {
+        ColumnFamilyDefinition familyDefinition = familyDefinitions.get(family);
+        ColumnDefinition defaultDefinition = familyDefinition.getDefaultDefinition();
+        if (defaultDefinition != null) {
+          fullTextColumnFamilies.put(family, defaultDefinition.isFullTextIndex());
+        }
+        load(family, familyDefinition, analyzers, fullTextFields, subIndexNameLookups, subIndexNames, typeLookup,
+            fieldTypes);
+      }
+    }
+  }
+
+  private static void load(String family, ColumnFamilyDefinition familyDefinition, Map<String, Analyzer> analyzers,
+      Map<String, Boolean> fullTextFields, Map<String, Set<String>> subIndexNameLookups, Set<String> subIndexNames,
+      Map<String, TYPE> typeLookup, Map<String, FieldType> fieldTypes) {
+    Map<String, ColumnDefinition> columnDefinitions = familyDefinition.getColumnDefinitions();
+    if (columnDefinitions != null) {
+      for (String column : columnDefinitions.keySet()) {
+        ColumnDefinition columnDefinition = columnDefinitions.get(column);
+        load(family, familyDefinition, column, columnDefinition, analyzers, fullTextFields, subIndexNameLookups,
+            subIndexNames, typeLookup, fieldTypes);
+      }
+    }
+  }
+
+  private static void load(String family, ColumnFamilyDefinition familyDefinition, String column,
+      ColumnDefinition columnDefinition, Map<String, Analyzer> analyzers, Map<String, Boolean> fullTextFields,
+      Map<String, Set<String>> subIndexNameLookups, Set<String> subIndexNames, Map<String, TYPE> typeLookup,
+      Map<String, FieldType> fieldTypes) {
+    Map<String, AlternateColumnDefinition> alternateColumnDefinitions = columnDefinition
+        .getAlternateColumnDefinitions();
+    if (alternateColumnDefinitions != null) {
+      for (String subColumn : alternateColumnDefinitions.keySet()) {
+        AlternateColumnDefinition alternateColumnDefinition = alternateColumnDefinitions.get(subColumn);
+        load(family, familyDefinition, column, columnDefinition, subColumn, alternateColumnDefinition, analyzers,
+            subIndexNameLookups, subIndexNames, typeLookup, fieldTypes);
+      }
+    }
+    String fieldName = family + "." + column;
+    Analyzer analyzer = getAnalyzerByClassName(columnDefinition.getAnalyzerClassName(), aliases, fieldName, typeLookup,
+        fieldTypes);
+    analyzers.put(fieldName, analyzer);
+    if (columnDefinition.isFullTextIndex()) {
+      fullTextFields.put(fieldName, Boolean.TRUE);
+    } else {
+      fullTextFields.put(fieldName, Boolean.FALSE);
+    }
+  }
+
+  private static void load(String family, ColumnFamilyDefinition familyDefinition, String column,
+      ColumnDefinition columnDefinition, String subColumn, AlternateColumnDefinition alternateColumnDefinition,
+      Map<String, Analyzer> analyzers, Map<String, Set<String>> subIndexNameLookups, Set<String> subIndexNames,
+      Map<String, TYPE> typeLookup, Map<String, FieldType> fieldTypes) {
+    String fieldName = family + "." + column + "." + subColumn;
+    Analyzer analyzer = getAnalyzerByClassName(alternateColumnDefinition.getAnalyzerClassName(), aliases, fieldName,
+        typeLookup, fieldTypes);
+    analyzers.put(fieldName, analyzer);
+    addSubField(fieldName, subIndexNameLookups);
+    subIndexNames.add(fieldName);
+  }
+
+  @SuppressWarnings("unchecked")
+  private static Analyzer getAnalyzerByClassName(String className, Map<String, Class<? extends Analyzer>> aliases,
+      String fieldName, Map<String, TYPE> typeLookup, Map<String, FieldType> fieldTypes) {
+    TYPE type = getType(className, fieldName, fieldTypes);
+    if (fieldName != null) {
+      typeLookup.put(fieldName, type);
+    }
+    if (type != TYPE.TEXT) {
+      return null;
+    }
+    try {
+      Class<? extends Analyzer> clazz = aliases.get(className);
+      if (clazz == null) {
+        clazz = (Class<? extends Analyzer>) Class.forName(className);
+      }
+      try {
+        return (Analyzer) clazz.newInstance();
+      } catch (Exception e) {
+        Constructor<?> constructor = clazz.getConstructor(new Class[] { Version.class });
+        return (Analyzer) constructor.newInstance(LUCENE_VERSION);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private static TYPE getType(String typeStr, String fieldName, Map<String, FieldType> fieldTypes) {
+    if (typeStr == null) {
+      return null;
+    }
+    String[] types = typeStr.split(",");
+    String type = types[0];
+    String typeUpper = type.toUpperCase();
+
+    TYPE t = null;
+    if (!typeNameCache.contains(typeUpper)) {
+      t = TYPE.TEXT;
+    } else {
+      t = TYPE.valueOf(typeUpper);
+    }
+
+    FieldType fieldType;
+    switch (t) {
+    case LONG:
+      fieldType = new FieldType(LongField.TYPE_STORED);
+      if (types.length > 1) {
+        fieldType.setNumericPrecisionStep(Integer.parseInt(types[1]));
+      }
+      break;
+    case INTEGER:
+      fieldType = new FieldType(IntField.TYPE_STORED);
+      if (types.length > 1) {
+        fieldType.setNumericPrecisionStep(Integer.parseInt(types[1]));
+      }
+      break;
+    case FLOAT:
+      fieldType = new FieldType(FloatField.TYPE_STORED);
+      if (types.length > 1) {
+        fieldType.setNumericPrecisionStep(Integer.parseInt(types[1]));
+      }
+      break;
+    case DOUBLE:
+      fieldType = new FieldType(DoubleField.TYPE_STORED);
+      if (types.length > 1) {
+        fieldType.setNumericPrecisionStep(Integer.parseInt(types[1]));
+      }
+      break;
+    default:
+      fieldType = new FieldType(TextField.TYPE_STORED);
+      fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+      break;
+    }
+    fieldTypes.put(fieldName, fieldType);
+    return t;
+  }
+
+  private static void addSubField(String name, Map<String, Set<String>> subIndexNameLookups) {
+    int lastIndexOf = name.lastIndexOf('.');
+    String mainFieldName = name.substring(0, lastIndexOf);
+    Set<String> set = subIndexNameLookups.get(mainFieldName);
+    if (set == null) {
+      set = new TreeSet<String>();
+      subIndexNameLookups.put(mainFieldName, set);
+    }
+    set.add(name);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/AbstractWrapperQuery.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/AbstractWrapperQuery.java b/blur-query/src/main/java/org/apache/blur/lucene/search/AbstractWrapperQuery.java
new file mode 100644
index 0000000..8f2b2cc
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/AbstractWrapperQuery.java
@@ -0,0 +1,106 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.similarities.Similarity;
+
+public abstract class AbstractWrapperQuery extends Query {
+  protected Query _query;
+  protected boolean _rewritten;
+
+  public AbstractWrapperQuery(Query query) {
+    this(query, false);
+  }
+
+  public AbstractWrapperQuery(Query query, boolean rewritten) {
+    this._query = query;
+    this._rewritten = rewritten;
+  }
+
+  public Query getQuery() {
+    return _query;
+  }
+
+  public boolean isRewritten() {
+    return _rewritten;
+  }
+
+  public abstract Query clone();
+
+  public abstract Weight createWeight(IndexSearcher searcher) throws IOException;
+
+  public void extractTerms(Set<Term> terms) {
+    _query.extractTerms(terms);
+  }
+
+  public float getBoost() {
+    return _query.getBoost();
+  }
+
+  public Similarity getSimilarity(IndexSearcher searcher) {
+    return searcher.getSimilarity();
+  }
+
+  public abstract Query rewrite(IndexReader reader) throws IOException;
+
+  public void setBoost(float b) {
+    _query.setBoost(b);
+  }
+
+  
+  
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    result = prime * result + ((_query == null) ? 0 : _query.hashCode());
+    result = prime * result + (_rewritten ? 1231 : 1237);
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (!super.equals(obj))
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    AbstractWrapperQuery other = (AbstractWrapperQuery) obj;
+    if (_query == null) {
+      if (other._query != null)
+        return false;
+    } else if (!_query.equals(other._query))
+      return false;
+    if (_rewritten != other._rewritten)
+      return false;
+    return true;
+  }
+
+  public abstract String toString();
+
+  public abstract String toString(String field);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/FacetQuery.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/FacetQuery.java b/blur-query/src/main/java/org/apache/blur/lucene/search/FacetQuery.java
new file mode 100644
index 0000000..06fb712
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/FacetQuery.java
@@ -0,0 +1,205 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicLongArray;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+
+public class FacetQuery extends AbstractWrapperQuery {
+
+  private Query[] facets;
+  private AtomicLongArray counts;
+
+  public FacetQuery(Query query, Query[] facets, AtomicLongArray counts) {
+    super(query, false);
+    this.facets = facets;
+    this.counts = counts;
+  }
+
+  public FacetQuery(Query query, Query[] facets, AtomicLongArray counts, boolean rewritten) {
+    super(query, rewritten);
+    this.facets = facets;
+    this.counts = counts;
+  }
+
+  public String toString() {
+    return "facet:{" + _query.toString() + "}";
+  }
+
+  public String toString(String field) {
+    return "facet:{" + _query.toString(field) + "}";
+  }
+
+  @Override
+  public Query clone() {
+    return new FacetQuery((Query) _query.clone(), facets, counts, _rewritten);
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    if (_rewritten) {
+      return this;
+    }
+    for (int i = 0; i < facets.length; i++) {
+      facets[i] = facets[i].rewrite(reader);
+    }
+    return new FacetQuery(_query.rewrite(reader), facets, counts, true);
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher) throws IOException {
+    Weight weight = _query.createWeight(searcher);
+    return new FacetWeight(weight, getWeights(searcher), counts);
+  }
+
+  private Weight[] getWeights(IndexSearcher searcher) throws IOException {
+    Weight[] weights = new Weight[facets.length];
+    for (int i = 0; i < weights.length; i++) {
+      weights[i] = facets[i].createWeight(searcher);
+    }
+    return weights;
+  }
+
+  public static class FacetWeight extends Weight {
+
+    private Weight weight;
+    private Weight[] facets;
+    private AtomicLongArray counts;
+
+    public FacetWeight(Weight weight, Weight[] facets, AtomicLongArray counts) {
+      this.weight = weight;
+      this.facets = facets;
+      this.counts = counts;
+    }
+
+    @Override
+    public Explanation explain(AtomicReaderContext reader, int doc) throws IOException {
+      return weight.explain(reader, doc);
+    }
+
+    @Override
+    public Query getQuery() {
+      return weight.getQuery();
+    }
+
+    @Override
+    public void normalize(float norm, float topLevelBoost) {
+      weight.normalize(norm, topLevelBoost);
+    }
+
+    @Override
+    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+      Scorer scorer = weight.scorer(context, true, topScorer, acceptDocs);
+      if (scorer == null) {
+        return null;
+      }
+      return new FacetScorer(scorer, getScorers(context, true, topScorer, acceptDocs), counts);
+    }
+
+    private Scorer[] getScorers(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+      Scorer[] scorers = new Scorer[facets.length];
+      for (int i = 0; i < scorers.length; i++) {
+        scorers[i] = facets[i].scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
+      }
+      return scorers;
+    }
+
+    @Override
+    public float getValueForNormalization() throws IOException {
+      return weight.getValueForNormalization();
+    }
+  }
+
+  public static class FacetScorer extends Scorer {
+
+    private Scorer baseScorer;
+    private Scorer[] facets;
+    private AtomicLongArray counts;
+    private int facetLength;
+
+    public FacetScorer(Scorer scorer, Scorer[] facets, AtomicLongArray counts) {
+      super(scorer.getWeight());
+      this.baseScorer = scorer;
+      this.facets = facets;
+      this.counts = counts;
+      this.facetLength = facets.length;
+    }
+
+    private int processFacets(int doc) throws IOException {
+      if (doc == NO_MORE_DOCS) {
+        return doc;
+      }
+      for (int i = 0; i < facetLength; i++) {
+        Scorer facet = facets[i];
+        if (facet == null) {
+          continue;
+        }
+        int docID = facet.docID();
+        if (docID == NO_MORE_DOCS) {
+          continue;
+        }
+        if (docID == doc) {
+          counts.incrementAndGet(i);
+        } else if (docID < doc) {
+          if (facet.advance(doc) == doc) {
+            counts.incrementAndGet(i);
+          }
+        }
+      }
+      return doc;
+    }
+
+    @Override
+    public float score() throws IOException {
+      return baseScorer.score();
+    }
+
+    @Override
+    public int advance(int target) throws IOException {
+      return processFacets(baseScorer.advance(target));
+    }
+
+    @Override
+    public int docID() {
+      return baseScorer.docID();
+    }
+
+    @Override
+    public int nextDoc() throws IOException {
+      return processFacets(baseScorer.nextDoc());
+    }
+
+    @Override
+    public int freq() throws IOException {
+      return baseScorer.freq();
+    }
+
+    @Override
+    public long cost() {
+      return baseScorer.cost();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/FairSimilarity.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/FairSimilarity.java b/blur-query/src/main/java/org/apache/blur/lucene/search/FairSimilarity.java
new file mode 100644
index 0000000..8c8b945
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/FairSimilarity.java
@@ -0,0 +1,61 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.search.similarities.TFIDFSimilarity;
+import org.apache.lucene.util.BytesRef;
+
+public class FairSimilarity extends TFIDFSimilarity {
+
+  @Override
+  public float coord(int overlap, int maxOverlap) {
+    return 1;
+  }
+
+  @Override
+  public float idf(long docFreq, long numDocs) {
+    return 1;
+  }
+
+  @Override
+  public float queryNorm(float sumOfSquaredWeights) {
+    return 1;
+  }
+
+  @Override
+  public float sloppyFreq(int distance) {
+    return 1;
+  }
+
+  @Override
+  public float tf(float freq) {
+    return 1;
+  }
+
+  @Override
+  public float scorePayload(int doc, int start, int end, BytesRef payload) {
+    return 1;
+  }
+
+  @Override
+  public float lengthNorm(FieldInvertState fieldInvertState) {
+    throw new RuntimeException("not sure");
+//    return 0;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/IterablePaging.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/IterablePaging.java b/blur-query/src/main/java/org/apache/blur/lucene/search/IterablePaging.java
new file mode 100644
index 0000000..586b68a
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/IterablePaging.java
@@ -0,0 +1,227 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopScoreDocCollector;
+
+/**
+ * The {@link IterablePaging} class allows for easy paging through lucene hits.
+ */
+public class IterablePaging implements Iterable<ScoreDoc> {
+
+  private final IndexSearcher searcher;
+  private final Query query;
+  private final AtomicBoolean running;
+  private final int numHitsToCollect;
+
+  private TotalHitsRef totalHitsRef;
+  private ProgressRef progressRef;
+  private int skipTo;
+  private int gather = -1;
+
+  public IterablePaging(AtomicBoolean running, IndexSearcher searcher, Query query,
+      int numHitsToCollect, TotalHitsRef totalHitsRef, ProgressRef progressRef) throws IOException {
+    this.running = running;
+    this.query = searcher.rewrite(query);
+    this.searcher = searcher;
+    this.numHitsToCollect = numHitsToCollect;
+    this.totalHitsRef = totalHitsRef == null ? new TotalHitsRef() : totalHitsRef;
+    this.progressRef = progressRef == null ? new ProgressRef() : progressRef;
+
+  }
+
+  public static class TotalHitsRef {
+    // This is an atomic integer because more than likely if there is
+    // any status sent to the user, it will be done in another thread.
+    protected AtomicInteger totalHits = new AtomicInteger(0);
+
+    public int totalHits() {
+      return totalHits.get();
+    }
+  }
+
+  public static class ProgressRef {
+    // These are atomic integers because more than likely if there is
+    // any status sent to the user, it will be done in another thread.
+    protected AtomicInteger skipTo = new AtomicInteger(0);
+    protected AtomicInteger currentHitPosition = new AtomicInteger(0);
+    protected AtomicInteger searchesPerformed = new AtomicInteger(0);
+    protected AtomicLong queryTime = new AtomicLong(0);
+
+    public int skipTo() {
+      return skipTo.get();
+    }
+
+    public int currentHitPosition() {
+      return currentHitPosition.get();
+    }
+
+    public int searchesPerformed() {
+      return searchesPerformed.get();
+    }
+
+    public long queryTime() {
+      return queryTime.get();
+    }
+  }
+
+  /**
+   * Gets the total hits of the search.
+   * 
+   * @return the total hits.
+   */
+  public int getTotalHits() {
+    return totalHitsRef.totalHits();
+  }
+
+  /**
+   * Allows for gathering of the total hits of this search.
+   * 
+   * @param ref
+   *          {@link TotalHitsRef}.
+   * @return this.
+   */
+  public IterablePaging totalHits(TotalHitsRef ref) {
+    totalHitsRef = ref;
+    return this;
+  }
+
+  /**
+   * Skips the first x number of hits.
+   * 
+   * @param skipTo
+   *          the number hits to skip.
+   * @return this.
+   */
+  public IterablePaging skipTo(int skipTo) {
+    this.skipTo = skipTo;
+    return this;
+  }
+
+  /**
+   * Only gather up to x number of hits.
+   * 
+   * @param gather
+   *          the number of hits to gather.
+   * @return this.
+   */
+  public IterablePaging gather(int gather) {
+    this.gather = gather;
+    return this;
+  }
+
+  /**
+   * Allows for gathering the progress of the paging.
+   * 
+   * @param ref
+   *          the {@link ProgressRef}.
+   * @return this.
+   */
+  public IterablePaging progress(ProgressRef ref) {
+    this.progressRef = ref;
+    return this;
+  }
+
+  /**
+   * The {@link ScoreDoc} iterator.
+   */
+  @Override
+  public Iterator<ScoreDoc> iterator() {
+    return skipHits(new PagingIterator());
+  }
+
+  class PagingIterator implements Iterator<ScoreDoc> {
+    private ScoreDoc[] scoreDocs;
+    private int counter = 0;
+    private int offset = 0;
+    private int endPosition = gather == -1 ? Integer.MAX_VALUE : skipTo + gather;
+    private ScoreDoc lastScoreDoc;
+
+    PagingIterator() {
+      search();
+    }
+
+    void search() {
+      long s = System.currentTimeMillis();
+      progressRef.searchesPerformed.incrementAndGet();
+      try {
+        TopScoreDocCollector collector = TopScoreDocCollector.create(numHitsToCollect, lastScoreDoc, true);
+        StopExecutionCollector stopExecutionCollector = new StopExecutionCollector(collector, running);
+        searcher.search(query, stopExecutionCollector);
+        totalHitsRef.totalHits.set(collector.getTotalHits());
+        TopDocs topDocs = collector.topDocs();
+        scoreDocs = topDocs.scoreDocs;
+      } catch (IOException e) {
+        e.printStackTrace();
+        throw new RuntimeException(e);
+      }
+      if (scoreDocs.length > 0) {
+        lastScoreDoc = scoreDocs[scoreDocs.length - 1];
+      } else {
+        lastScoreDoc = null;
+      }
+      long e = System.currentTimeMillis();
+      progressRef.queryTime.addAndGet(e - s);
+    }
+
+    @Override
+    public boolean hasNext() {
+      return counter < totalHitsRef.totalHits() && counter < endPosition ? true : false;
+    }
+
+    @Override
+    public ScoreDoc next() {
+      if (isCurrentCollectorExhausted()) {
+        search();
+        offset = 0;
+      }
+      progressRef.currentHitPosition.set(counter);
+      counter++;
+      return scoreDocs[offset++];
+    }
+
+    private boolean isCurrentCollectorExhausted() {
+      return offset < scoreDocs.length ? false : true;
+    }
+
+    @Override
+    public void remove() {
+      throw new RuntimeException("read only");
+    }
+  }
+
+  private Iterator<ScoreDoc> skipHits(Iterator<ScoreDoc> iterator) {
+    progressRef.skipTo.set(skipTo);
+    for (int i = 0; i < skipTo && iterator.hasNext(); i++) {
+      // eats the hits, and moves the iterator to the desired skip to position.
+      progressRef.currentHitPosition.set(i);
+      iterator.next();
+    }
+    return iterator;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/PagingCollector.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/PagingCollector.java b/blur-query/src/main/java/org/apache/blur/lucene/search/PagingCollector.java
new file mode 100644
index 0000000..fec888c
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/PagingCollector.java
@@ -0,0 +1,116 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.util.PriorityQueue;
+
+/**
+ * The {@link PagingCollector} allows for paging through lucene hits.
+ */
+public class PagingCollector extends TopDocsCollector<ScoreDoc> {
+
+  private ScoreDoc pqTop;
+  private int docBase;
+  private Scorer scorer;
+  private ScoreDoc previousPassLowest;
+  private int numHits;
+
+  public PagingCollector(int numHits) {
+    // creates an empty score doc so that i don't have to check for null
+    // each time.
+    this(numHits, new ScoreDoc(-1, Float.MAX_VALUE));
+  }
+
+  public PagingCollector(int numHits, ScoreDoc previousPassLowest) {
+    super(new HitQueue(numHits, true));
+    this.pqTop = pq.top();
+    this.numHits = numHits;
+    this.previousPassLowest = previousPassLowest;
+  }
+
+  @Override
+  public boolean acceptsDocsOutOfOrder() {
+    return true;
+  }
+
+  @Override
+  public void collect(int doc) throws IOException {
+    float score = scorer.score();
+    totalHits++;
+    doc += docBase;
+    if (score > previousPassLowest.score) {
+      // this hit was gathered on a previous page.
+      return;
+    } else if (score == previousPassLowest.score && doc <= previousPassLowest.doc) {
+      // if the scores are the same and the doc is less than or equal to the
+      // previous pass lowest hit doc then skip because this collector favors
+      // lower number documents.
+      return;
+    } else if (score < pqTop.score || (score == pqTop.score && doc > pqTop.doc)) {
+      return;
+    }
+    pqTop.doc = doc;
+    pqTop.score = score;
+    pqTop = pq.updateTop();
+  }
+
+  @Override
+  public void setNextReader(AtomicReaderContext context) throws IOException {
+    this.docBase = context.docBase;
+  }
+
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {
+    this.scorer = scorer;
+  }
+
+  public ScoreDoc getLastScoreDoc(TopDocs topDocs) {
+    return topDocs.scoreDocs[(totalHits < numHits ? totalHits : numHits) - 1];
+  }
+
+  public ScoreDoc getLastScoreDoc(ScoreDoc[] scoreDocs) {
+    return scoreDocs[(totalHits < numHits ? totalHits : numHits) - 1];
+  }
+
+  public static class HitQueue extends PriorityQueue<ScoreDoc> {
+
+    HitQueue(int size, boolean prePopulate) {
+      super(size, prePopulate);
+    }
+
+    @Override
+    protected ScoreDoc getSentinelObject() {
+      return new ScoreDoc(Integer.MAX_VALUE, Float.NEGATIVE_INFINITY);
+    }
+
+    @Override
+    protected final boolean lessThan(ScoreDoc hitA, ScoreDoc hitB) {
+      if (hitA.score == hitB.score)
+        return hitA.doc > hitB.doc;
+      else
+        return hitA.score < hitB.score;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/PrimeDocCache.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/PrimeDocCache.java b/blur-query/src/main/java/org/apache/blur/lucene/search/PrimeDocCache.java
new file mode 100644
index 0000000..133522f
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/PrimeDocCache.java
@@ -0,0 +1,100 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.blur.log.Log;
+import org.apache.blur.log.LogFactory;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReader.ReaderClosedListener;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.OpenBitSet;
+
+public class PrimeDocCache {
+
+  private static final Log LOG = LogFactory.getLog(PrimeDocCache.class);
+
+  public static final OpenBitSet EMPTY_BIT_SET = new OpenBitSet();
+
+  private static Map<Term,Map<Object, OpenBitSet>> termPrimeDocMap = new ConcurrentHashMap<Term, Map<Object,OpenBitSet>>();
+
+  /**
+   * The way this method is called via warm up methods the likelihood of
+   * creating multiple bitsets during a race condition is very low, that's why
+   * this method is not synced.
+   */
+  public static OpenBitSet getPrimeDocBitSet(Term primeDocTerm, IndexReader reader) throws IOException {
+    Object key = reader.getCoreCacheKey();
+    final Map<Object, OpenBitSet> primeDocMap = getPrimeDocMap(primeDocTerm);
+    OpenBitSet bitSet = primeDocMap.get(key);
+    if (bitSet == null) {
+      reader.addReaderClosedListener(new ReaderClosedListener() {
+        @Override
+        public void onClose(IndexReader reader) {
+          Object key = reader.getCoreCacheKey();
+          LOG.debug("Current size [" + primeDocMap.size() + "] Prime Doc BitSet removing for segment [" + reader + "]");
+          primeDocMap.remove(key);
+        }
+      });
+      LOG.debug("Prime Doc BitSet missing for segment [" + reader + "] current size [" + primeDocMap.size() + "]");
+      final OpenBitSet bs = new OpenBitSet(reader.maxDoc());
+      primeDocMap.put(key, bs);
+      IndexSearcher searcher = new IndexSearcher(reader);
+      searcher.search(new TermQuery(primeDocTerm), new Collector() {
+
+        @Override
+        public void setScorer(Scorer scorer) throws IOException {
+
+        }
+
+        @Override
+        public void setNextReader(AtomicReaderContext atomicReaderContext) throws IOException {
+        }
+
+        @Override
+        public void collect(int doc) throws IOException {
+          bs.set(doc);
+        }
+
+        @Override
+        public boolean acceptsDocsOutOfOrder() {
+          return false;
+        }
+      });
+      return bs;
+    }
+    return bitSet;
+  }
+
+  private static Map<Object, OpenBitSet> getPrimeDocMap(Term primeDocTerm) {
+    Map<Object, OpenBitSet> map = termPrimeDocMap.get(primeDocTerm);
+    if (map == null) {
+      termPrimeDocMap.put(primeDocTerm, new ConcurrentHashMap<Object, OpenBitSet>());
+      return termPrimeDocMap.get(primeDocTerm);
+    }
+    return map;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/SlowQuery.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/SlowQuery.java b/blur-query/src/main/java/org/apache/blur/lucene/search/SlowQuery.java
new file mode 100644
index 0000000..7ec5263
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/SlowQuery.java
@@ -0,0 +1,142 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+
+public class SlowQuery extends Query {
+
+  private Query query;
+  private boolean rewritten = false;
+
+  public SlowQuery(Query query) {
+    this.query = query;
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher) throws IOException {
+    Weight weight = query.createWeight(searcher);
+    return new SlowWeight(this, weight);
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    if (!rewritten) {
+      query = query.rewrite(reader);
+    }
+    return this;
+  }
+
+  @Override
+  public String toString(String field) {
+    return query.toString(field);
+  }
+
+  public static class SlowWeight extends Weight {
+
+    private final Weight weight;
+    private final Query query;
+
+    public SlowWeight(Query query, Weight weight) {
+      this.query = query;
+      this.weight = weight;
+    }
+
+    @Override
+    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+      return weight.explain(context, doc);
+    }
+
+    @Override
+    public Query getQuery() {
+      return query;
+    }
+
+    @Override
+    public float getValueForNormalization() throws IOException {
+      return weight.getValueForNormalization();
+    }
+
+    @Override
+    public void normalize(float norm, float topLevelBoost) {
+      weight.normalize(norm, topLevelBoost);
+    }
+
+    @Override
+    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+      Scorer scorer = weight.scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
+      if (scorer == null) {
+        highCpuWait(1);
+        return null;
+      }
+      return new SlowScorer(weight, scorer);
+    }
+
+  }
+
+  public static class SlowScorer extends Scorer {
+
+    private final Scorer scorer;
+
+    protected SlowScorer(Weight weight, Scorer scorer) {
+      super(weight);
+      this.scorer = scorer;
+    }
+
+    public int docID() {
+      return scorer.docID();
+    }
+
+    public int nextDoc() throws IOException {
+      highCpuWait(1);
+      return scorer.nextDoc();
+    }
+
+    public int advance(int target) throws IOException {
+      highCpuWait(1);
+      return scorer.advance(target);
+    }
+
+    public float score() throws IOException {
+      return scorer.score();
+    }
+
+    public int freq() throws IOException {
+      return scorer.freq();
+    }
+
+    @Override
+    public long cost() {
+      return scorer.cost();
+    }
+
+  }
+
+  public static void highCpuWait(long ms) {
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/StopExecutionCollector.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/StopExecutionCollector.java b/blur-query/src/main/java/org/apache/blur/lucene/search/StopExecutionCollector.java
new file mode 100644
index 0000000..d97a14f
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/StopExecutionCollector.java
@@ -0,0 +1,67 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Scorer;
+
+public class StopExecutionCollector extends Collector {
+
+  private static final long _5MS = TimeUnit.MILLISECONDS.toNanos(5);
+
+  private Collector _collector;
+  private AtomicBoolean _running;
+  private long last;
+
+  public StopExecutionCollector(Collector collector, AtomicBoolean running) {
+    _collector = collector;
+    _running = running;
+  }
+
+  public static class StopExecutionCollectorException extends RuntimeException {
+    private static final long serialVersionUID = 5753875017543945163L;
+  }
+
+  public boolean acceptsDocsOutOfOrder() {
+    return _collector.acceptsDocsOutOfOrder();
+  }
+
+  public void collect(int doc) throws IOException {
+    long now = System.nanoTime();
+    if (last + _5MS < now) {
+      if (!_running.get()) {
+        throw new StopExecutionCollectorException();
+      }
+      last = now;
+    }
+    _collector.collect(doc);
+  }
+
+  public void setNextReader(AtomicReaderContext context) throws IOException {
+    _collector.setNextReader(context);
+  }
+
+  public void setScorer(Scorer scorer) throws IOException {
+    _collector.setScorer(scorer);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/SuperParser.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/SuperParser.java b/blur-query/src/main/java/org/apache/blur/lucene/search/SuperParser.java
new file mode 100644
index 0000000..c3a7910
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/SuperParser.java
@@ -0,0 +1,304 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.blur.analysis.BlurAnalyzer;
+import org.apache.blur.analysis.BlurAnalyzer.TYPE;
+import org.apache.blur.thrift.generated.ScoreType;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryparser.classic.ParseException;
+import org.apache.lucene.queryparser.classic.QueryParser;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MultiPhraseQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Version;
+
+public class SuperParser extends QueryParser {
+
+  private static final String MUST_NOT_STRING = "-";
+  private static final String MUST_STRING = "+";
+  private static final Pattern PATTERN = Pattern.compile("([-+]{0,1})\\s*?super\\s*?\\:\\s*?\\<(.*?)\\>");
+  private static final Pattern CHECK = Pattern.compile("super\\s*?\\:\\s*?\\<");
+  private static final String SUPER = "super";
+  private final Map<Query, String> fieldNames = new HashMap<Query, String>();
+  private final boolean superSearch;
+  private final Filter queryFilter;
+  private final ScoreType scoreType;
+  private final BlurAnalyzer blurAnalyzer;
+  private final Version matchVersion;
+  private final Term defaultPrimeDocTerm;
+
+  public SuperParser(Version matchVersion, BlurAnalyzer a, boolean superSearch, Filter queryFilter, ScoreType scoreType, Term defaultPrimeDocTerm) {
+    super(matchVersion, "super", a);
+    this.matchVersion = matchVersion;
+    this.setAutoGeneratePhraseQueries(true);
+    this.setAllowLeadingWildcard(true);
+    this.superSearch = superSearch;
+    this.queryFilter = queryFilter;
+    this.scoreType = scoreType;
+    this.blurAnalyzer = a;
+    this.defaultPrimeDocTerm = defaultPrimeDocTerm;
+  }
+
+  @Override
+  public Query parse(String query) throws ParseException {
+    Matcher matcher = PATTERN.matcher(query);
+    BooleanQuery booleanQuery = null;
+    while (matcher.find()) {
+      int count = matcher.groupCount();
+      for (int i = 0; i < count; i++) {
+        String occurString = matcher.group(i + 1);
+        i++;
+        String superQueryStr = matcher.group(i + 1);
+        Matcher matcherCheck = CHECK.matcher(superQueryStr);
+        if (matcherCheck.find()) {
+          throw new ParseException(
+              "Embedded super queries are not allowed [" + query
+                  + "].");
+        }
+
+        if (booleanQuery == null) {
+          booleanQuery = new BooleanQuery();
+        }
+
+        Occur occur = getOccur(occurString);
+        QueryParser parser = new QueryParser(matchVersion, SUPER, blurAnalyzer);
+
+        Query superQuery = parser.parse(superQueryStr);
+        booleanQuery.add(new SuperQuery(superQuery, scoreType, defaultPrimeDocTerm), occur);
+      }
+    }
+    if (booleanQuery == null) {
+      return super.parse(query);
+    }
+    return booleanQuery;
+  }
+
+  private Occur getOccur(String occurString) {
+    if (occurString.equals(MUST_STRING)) {
+      return Occur.MUST;
+    }
+    if (occurString.equals(MUST_NOT_STRING)) {
+      return Occur.MUST_NOT;
+    }
+    return Occur.SHOULD;
+  }
+
+  @Override
+  protected Query newFuzzyQuery(Term term, float minimumSimilarity,
+      int prefixLength) {
+    String field = term.field();
+    TYPE type = blurAnalyzer.getTypeLookup(field);
+    if (type != TYPE.TEXT) {
+      throw new RuntimeException("Field [" + field + "] is type [" + type
+          + "] which does not support fuzzy queries.");
+    }
+    return addField(
+        super.newFuzzyQuery(term, minimumSimilarity, prefixLength),
+        term.field());
+  }
+
+  @Override
+  protected Query newMatchAllDocsQuery() {
+    return addField(super.newMatchAllDocsQuery(), UUID.randomUUID()
+        .toString());
+  }
+
+  @Override
+  protected MultiPhraseQuery newMultiPhraseQuery() {
+    return new MultiPhraseQuery() {
+
+      @Override
+      public void add(Term[] terms, int position) {
+        super.add(terms, position);
+        for (Term term : terms) {
+          addField(this, term.field());
+        }
+      }
+    };
+  }
+
+  @Override
+  protected PhraseQuery newPhraseQuery() {
+    return new PhraseQuery() {
+
+      @Override
+      public void add(Term term, int position) {
+        super.add(term, position);
+        addField(this, term.field());
+      }
+    };
+  }
+
+  @Override
+  protected Query newPrefixQuery(Term prefix) {
+    String field = prefix.field();
+    TYPE type = blurAnalyzer.getTypeLookup(field);
+    if (type != TYPE.TEXT) {
+      throw new RuntimeException("Field [" + field + "] is type [" + type
+          + "] which does not support prefix queries.");
+    }
+    return addField(super.newPrefixQuery(prefix), field);
+  }
+
+  @Override
+  protected Query newRangeQuery(String field, String part1, String part2,
+      boolean startInclusive, boolean endInclusive) {
+    Query q = blurAnalyzer.getNewRangeQuery(field, part1, part2,
+        startInclusive, endInclusive);
+    if (q != null) {
+      return addField(q, field);
+    }
+    return addField(super.newRangeQuery(field, part1, part2,
+        startInclusive, endInclusive), field);
+  }
+
+  @Override
+  protected Query newTermQuery(Term term) {
+    String field = term.field();
+    Query q = blurAnalyzer.getNewRangeQuery(field, term.text(),
+        term.text(), true, true);
+    if (q != null) {
+      return addField(q, field);
+    }
+    return addField(super.newTermQuery(term), field);
+  }
+
+  @Override
+  protected Query newWildcardQuery(Term t) {
+    if (SUPER.equals(t.field()) && "*".equals(t.text())) {
+      return new MatchAllDocsQuery();
+    }
+    String field = t.field();
+    TYPE type = blurAnalyzer.getTypeLookup(field);
+    if (type != TYPE.TEXT) {
+      throw new RuntimeException("Field [" + field + "] is type [" + type
+          + "] which does not support wildcard queries.");
+    }
+    return addField(super.newWildcardQuery(t), t.field());
+  }
+
+  private SuperQuery newSuperQuery(Query query) {
+    return new SuperQuery(wrapFilter(query), scoreType, defaultPrimeDocTerm);
+  }
+
+  private Query wrapFilter(Query query) {
+    if (queryFilter == null) {
+      return query;
+    }
+    return new FilteredQuery(query, queryFilter);
+  }
+
+  // private boolean isSameGroupName(BooleanQuery booleanQuery) {
+  // String groupName = findFirstGroupName(booleanQuery);
+  // if (groupName == null) {
+  // return false;
+  // }
+  // return isSameGroupName(booleanQuery, groupName);
+  // }
+  //
+  // private boolean isSameGroupName(Query query, String groupName) {
+  // if (query instanceof BooleanQuery) {
+  // BooleanQuery booleanQuery = (BooleanQuery) query;
+  // for (BooleanClause clause : booleanQuery.clauses()) {
+  // if (!isSameGroupName(clause.getQuery(), groupName)) {
+  // return false;
+  // }
+  // }
+  // return true;
+  // } else {
+  // String fieldName = fieldNames.get(query);
+  // String currentGroupName = getGroupName(fieldName);
+  // if (groupName.equals(currentGroupName)) {
+  // return true;
+  // }
+  // return false;
+  // }
+  // }
+  //
+  // private String getGroupName(String fieldName) {
+  // if (fieldName == null) {
+  // return null;
+  // }
+  // int index = fieldName.indexOf(SEP);
+  // if (index < 0) {
+  // return null;
+  // }
+  // return fieldName.substring(0, index);
+  // }
+  //
+  // private String findFirstGroupName(Query query) {
+  // if (query instanceof BooleanQuery) {
+  // BooleanQuery booleanQuery = (BooleanQuery) query;
+  // for (BooleanClause clause : booleanQuery.clauses()) {
+  // return findFirstGroupName(clause.getQuery());
+  // }
+  // return null;
+  // } else {
+  // String fieldName = fieldNames.get(query);
+  // return getGroupName(fieldName);
+  // }
+  // }
+  private Query reprocess(Query query) {
+    if (query == null || !isSuperSearch()) {
+      return wrapFilter(query);
+    }
+    if (query instanceof BooleanQuery) {
+      BooleanQuery booleanQuery = (BooleanQuery) query;
+      List<BooleanClause> clauses = booleanQuery.clauses();
+      for (BooleanClause bc : clauses) {
+        Query q = bc.getQuery();
+        bc.setQuery(newSuperQuery(q));
+      }
+      return booleanQuery;
+
+      // if (isSameGroupName(booleanQuery)) {
+      // return newSuperQuery(query);
+      // } else {
+      // List<BooleanClause> clauses = booleanQuery.clauses();
+      // for (BooleanClause clause : clauses) {
+      // clause.setQuery(reprocess(clause.getQuery()));
+      // }
+      // return booleanQuery;
+      // }
+    } else {
+      return newSuperQuery(query);
+    }
+  }
+  private Query addField(Query q, String field) {
+    fieldNames.put(q, field);
+    return q;
+  }
+
+  public boolean isSuperSearch() {
+    return superSearch;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/search/SuperQuery.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/search/SuperQuery.java b/blur-query/src/main/java/org/apache/blur/lucene/search/SuperQuery.java
new file mode 100644
index 0000000..f3cee51
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/search/SuperQuery.java
@@ -0,0 +1,324 @@
+package org.apache.blur.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.OpenBitSet;
+import org.apache.blur.thrift.generated.ScoreType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SuperQuery extends AbstractWrapperQuery {
+
+  private final ScoreType scoreType;
+  private final Term primeDocTerm;
+
+  public SuperQuery(Query query, ScoreType scoreType, Term primeDocTerm) {
+    super(query, false);
+    this.scoreType = scoreType;
+    this.primeDocTerm = primeDocTerm;
+  }
+
+  public SuperQuery(Query query, ScoreType scoreType, Term primeDocTerm, boolean rewritten) {
+    super(query, rewritten);
+    this.scoreType = scoreType;
+    this.primeDocTerm = primeDocTerm;
+  }
+
+  public ScoreType getScoreType() {
+    return scoreType;
+  }
+
+  public Term getPrimeDocTerm() {
+    return primeDocTerm;
+  }
+
+  public Query clone() {
+    return new SuperQuery((Query) _query.clone(), scoreType, primeDocTerm, _rewritten);
+  }
+
+  public Weight createWeight(IndexSearcher searcher) throws IOException {
+    Weight weight = _query.createWeight(searcher);
+    return new SuperWeight(weight, _query.toString(), this, scoreType, primeDocTerm);
+  }
+
+  public Query rewrite(IndexReader reader) throws IOException {
+    if (_rewritten) {
+      return this;
+    }
+    return new SuperQuery(_query.rewrite(reader), scoreType, primeDocTerm, true);
+  }
+
+  public String toString() {
+    return "super:<" + _query.toString() + ">";
+  }
+
+  public String toString(String field) {
+    return "super:<" + _query.toString(field) + ">";
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    result = prime * result + ((primeDocTerm == null) ? 0 : primeDocTerm.hashCode());
+    result = prime * result + ((scoreType == null) ? 0 : scoreType.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (!super.equals(obj))
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    SuperQuery other = (SuperQuery) obj;
+    if (primeDocTerm == null) {
+      if (other.primeDocTerm != null)
+        return false;
+    } else if (!primeDocTerm.equals(other.primeDocTerm))
+      return false;
+    if (scoreType != other.scoreType)
+      return false;
+    return true;
+  }
+
+  public static class SuperWeight extends Weight {
+
+    private final Weight weight;
+    private final String originalQueryStr;
+    private final Query query;
+    private final ScoreType scoreType;
+    private final Term primeDocTerm;
+
+    public SuperWeight(Weight weight, String originalQueryStr, Query query, ScoreType scoreType, Term primeDocTerm) {
+      this.weight = weight;
+      this.originalQueryStr = originalQueryStr;
+      this.query = query;
+      this.scoreType = scoreType;
+      this.primeDocTerm = primeDocTerm;
+    }
+
+    @Override
+    public Query getQuery() {
+      return query;
+    }
+
+    @Override
+    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+      throw new RuntimeException("not supported");
+    }
+
+    /*
+     * This method needs to implement in some other way Weight doesn't provide
+     * this method at all
+     * 
+     * @Override public float getValue() { return weight.getValue(); }
+     */
+
+    @Override
+    public void normalize(float norm, float topLevelBoost) {
+      weight.normalize(norm, topLevelBoost);
+    }
+
+    @Override
+    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs)
+        throws IOException {
+      Scorer scorer = weight.scorer(context, true, topScorer, acceptDocs);
+      if (scorer == null) {
+        return null;
+      }
+      OpenBitSet primeDocBitSet = PrimeDocCache.getPrimeDocBitSet(primeDocTerm, context.reader());
+      return new SuperScorer(scorer, primeDocBitSet, originalQueryStr, scoreType);
+    }
+
+    @Override
+    public float getValueForNormalization() throws IOException {
+      return weight.getValueForNormalization();
+    }
+
+  }
+
+  @SuppressWarnings("unused")
+  public static class SuperScorer extends Scorer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(SuperScorer.class);
+
+    private static final String DOC_ID = "docId";
+    private static final String NEXT_DOC = "nextDoc";
+    private static final String ADVANCE = "advance";
+    private static final double SUPER_POWER_CONSTANT = 2;
+    private static final boolean debug = false;
+    private final Scorer scorer;
+    private final OpenBitSet bitSet;
+    private final String originalQueryStr;
+    private final ScoreType scoreType;
+    private int nextPrimeDoc;
+    private int primeDoc = -1;
+    private int numDocs;
+    private float bestScore;
+    private float aggregateScore;
+    private int hitsInEntity;
+
+    protected SuperScorer(Scorer scorer, OpenBitSet bitSet, String originalQueryStr, ScoreType scoreType) {
+      super(scorer.getWeight());
+      this.scorer = scorer;
+      this.bitSet = bitSet;
+      this.originalQueryStr = originalQueryStr;
+      this.scoreType = scoreType;
+    }
+
+    @Override
+    public float score() throws IOException {
+      switch (scoreType) {
+      case AGGREGATE:
+        return aggregateScore;
+      case BEST:
+        return bestScore;
+      case CONSTANT:
+        return 1;
+      case SUPER:
+        double log = Math.log10(aggregateScore) + 1.0;
+        double avg = aggregateScore / hitsInEntity;
+        double pow = Math.pow(avg, SUPER_POWER_CONSTANT);
+        return (float) Math.pow(log + pow, 1.0 / SUPER_POWER_CONSTANT);
+      }
+      throw new RuntimeException("Unknown Score type[" + scoreType + "]");
+    }
+
+    @Override
+    public int docID() {
+      return print(DOC_ID, primeDoc);
+    }
+
+    @Override
+    public int advance(int target) throws IOException {
+      if (target == NO_MORE_DOCS) {
+        return print(ADVANCE, primeDoc = scorer.advance(NO_MORE_DOCS));
+      }
+      int doc = scorer.docID();
+      int odoc = doc;
+      if (isScorerExhausted(doc)) {
+        return print(ADVANCE, primeDoc = doc);
+      }
+      if (target > doc || doc == -1) {
+        doc = scorer.advance(target);
+        if (isScorerExhausted(doc)) {
+          return print(ADVANCE, primeDoc = doc);
+        }
+      } else if (isScorerExhausted(doc)) {
+        return print(ADVANCE, primeDoc == -1 ? primeDoc = doc : primeDoc);
+      }
+      return print(ADVANCE, gatherAllHitsSuperDoc(doc));
+    }
+
+    private int print(String message, int i) {
+      if (debug) {
+        System.out.println(message + " [" + i + "] " + originalQueryStr);
+      }
+      return i;
+    }
+
+    @Override
+    public int nextDoc() throws IOException {
+      int doc = scorer.docID();
+      int odoc = doc;
+      if (isScorerExhausted(doc)) {
+        return primeDoc = doc;
+      }
+      if (doc == -1) {
+        doc = scorer.nextDoc();
+        if (isScorerExhausted(doc)) {
+          return print(NEXT_DOC, primeDoc = doc);
+        }
+      } else if (isScorerExhausted(doc)) {
+        return print(NEXT_DOC, primeDoc == -1 ? primeDoc = doc : primeDoc);
+      }
+
+      return print(NEXT_DOC, gatherAllHitsSuperDoc(doc));
+    }
+
+    private int gatherAllHitsSuperDoc(int doc) throws IOException {
+      reset();
+      primeDoc = getPrimeDoc(doc);
+      nextPrimeDoc = getNextPrimeDoc(doc);
+      numDocs = nextPrimeDoc - primeDoc;
+      float currentDocScore = 0;
+      while (doc < nextPrimeDoc) {
+        currentDocScore = scorer.score();
+        aggregateScore += currentDocScore;
+        if (currentDocScore > bestScore) {
+          bestScore = currentDocScore;
+        }
+        hitsInEntity++;
+        doc = scorer.nextDoc();
+      }
+      return primeDoc;
+    }
+
+    private void reset() {
+      numDocs = 0;
+      bestScore = 0;
+      aggregateScore = 0;
+      hitsInEntity = 0;
+    }
+
+    private int getNextPrimeDoc(int doc) {
+      int nextSetBit = bitSet.nextSetBit(doc + 1);
+      return nextSetBit == -1 ? NO_MORE_DOCS : nextSetBit;
+    }
+
+    private int getPrimeDoc(int doc) {
+      if (bitSet.fastGet(doc)) {
+        return doc;
+      }
+      return bitSet.prevSetBit(doc);
+    }
+
+    private boolean isScorerExhausted(int doc) {
+      return doc == NO_MORE_DOCS ? true : false;
+    }
+
+    @Override
+    public int freq() throws IOException {
+      return scorer.freq();
+    }
+
+    @Override
+    public long cost() {
+      // @TODO may be better to return the cardinality of the prime doc bitset,
+      // if not too costly to calculate.
+      return scorer.cost();
+    }
+  }
+
+  public Query getQuery() {
+    return _query;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/serializer/AbtractQueryWritable.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/serializer/AbtractQueryWritable.java b/blur-query/src/main/java/org/apache/blur/lucene/serializer/AbtractQueryWritable.java
new file mode 100644
index 0000000..68a62c2
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/serializer/AbtractQueryWritable.java
@@ -0,0 +1,40 @@
+package org.apache.blur.lucene.serializer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.hadoop.io.Writable;
+import org.apache.lucene.search.Query;
+
+public abstract class AbtractQueryWritable<T extends Query> implements Writable, Cloneable {
+
+  public abstract T getQuery();
+
+  public abstract void setQuery(T query);
+
+  public abstract Class<T> getType();
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public AbtractQueryWritable<T> clone() {
+    try {
+      return (AbtractQueryWritable<T>) super.clone();
+    } catch (CloneNotSupportedException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/serializer/BooleanClauseWritable.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/serializer/BooleanClauseWritable.java b/blur-query/src/main/java/org/apache/blur/lucene/serializer/BooleanClauseWritable.java
new file mode 100644
index 0000000..e96a19b
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/serializer/BooleanClauseWritable.java
@@ -0,0 +1,87 @@
+package org.apache.blur.lucene.serializer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanClause.Occur;
+
+public class BooleanClauseWritable implements Writable {
+
+  private BooleanClause booleanClause;
+
+  public BooleanClauseWritable() {
+
+  }
+
+  public BooleanClauseWritable(BooleanClause booleanClause) {
+    this.booleanClause = booleanClause;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    Occur occur = booleanClause.getOccur();
+    switch (occur) {
+    case MUST:
+      out.write(0);
+      break;
+    case MUST_NOT:
+      out.write(1);
+      break;
+    case SHOULD:
+      out.write(2);
+      break;
+    default:
+      throw new RuntimeException("Occur [" + occur + "] not supported");
+    }
+    new QueryWritable(booleanClause.getQuery()).write(out);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    Occur occur = null;
+    byte o = in.readByte();
+    switch (o) {
+    case 0:
+      occur = Occur.MUST;
+      break;
+    case 1:
+      occur = Occur.MUST_NOT;
+      break;
+    case 2:
+      occur = Occur.SHOULD;
+      break;
+    default:
+      throw new RuntimeException("Occur [" + o + "] not supported");
+    }
+    QueryWritable queryWritable = new QueryWritable();
+    queryWritable.readFields(in);
+    booleanClause = new BooleanClause(queryWritable.getQuery(), occur);
+  }
+
+  public BooleanClause getBooleanClause() {
+    return booleanClause;
+  }
+
+  public void setBooleanClause(BooleanClause booleanClause) {
+    this.booleanClause = booleanClause;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/serializer/BooleanQueryWritable.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/serializer/BooleanQueryWritable.java b/blur-query/src/main/java/org/apache/blur/lucene/serializer/BooleanQueryWritable.java
new file mode 100644
index 0000000..a21afce
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/serializer/BooleanQueryWritable.java
@@ -0,0 +1,78 @@
+package org.apache.blur.lucene.serializer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+
+public class BooleanQueryWritable extends AbtractQueryWritable<BooleanQuery> {
+
+  private BooleanQuery query;
+
+  public BooleanQuery getQuery() {
+    return query;
+  }
+
+  public void setQuery(BooleanQuery query) {
+    this.query = query;
+  }
+
+  public BooleanQueryWritable() {
+
+  }
+
+  public BooleanQueryWritable(BooleanQuery booleanQuery) {
+    this.query = booleanQuery;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeBoolean(query.isCoordDisabled());
+    out.writeFloat(query.getBoost());
+    out.writeInt(query.getMinimumNumberShouldMatch());
+    BooleanClause[] clauses = query.getClauses();
+    out.writeInt(clauses.length);
+    for (int i = 0; i < clauses.length; i++) {
+      BooleanClauseWritable booleanClauseWritable = new BooleanClauseWritable(clauses[i]);
+      booleanClauseWritable.write(out);
+    }
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    query = new BooleanQuery(in.readBoolean());
+    query.setBoost(in.readFloat());
+    query.setMinimumNumberShouldMatch(in.readInt());
+    int length = in.readInt();
+    for (int i = 0; i < length; i++) {
+      BooleanClauseWritable booleanClauseWritable = new BooleanClauseWritable();
+      booleanClauseWritable.readFields(in);
+      query.add(booleanClauseWritable.getBooleanClause());
+    }
+
+  }
+
+  @Override
+  public Class<BooleanQuery> getType() {
+    return BooleanQuery.class;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/b0e26648/blur-query/src/main/java/org/apache/blur/lucene/serializer/FuzzyQueryWritable.java
----------------------------------------------------------------------
diff --git a/blur-query/src/main/java/org/apache/blur/lucene/serializer/FuzzyQueryWritable.java b/blur-query/src/main/java/org/apache/blur/lucene/serializer/FuzzyQueryWritable.java
new file mode 100644
index 0000000..fe1d920
--- /dev/null
+++ b/blur-query/src/main/java/org/apache/blur/lucene/serializer/FuzzyQueryWritable.java
@@ -0,0 +1,110 @@
+package org.apache.blur.lucene.serializer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.lang.reflect.Field;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.FuzzyQuery;
+
+public class FuzzyQueryWritable extends AbtractQueryWritable<FuzzyQuery> {
+
+  private FuzzyQuery query;
+  private static Field maxExpansionsField;
+  private static Field transpositionsField;
+
+  static {
+    try {
+      maxExpansionsField = FuzzyQuery.class.getDeclaredField("maxExpansions");
+      transpositionsField = FuzzyQuery.class.getDeclaredField("transpositions");
+      maxExpansionsField.setAccessible(true);
+      transpositionsField.setAccessible(true);
+    } catch (SecurityException e) {
+      throw new RuntimeException(e);
+    } catch (NoSuchFieldException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  public FuzzyQueryWritable() {
+
+  }
+
+  public FuzzyQueryWritable(FuzzyQuery query) {
+    this.query = query;
+  }
+
+  public FuzzyQuery getQuery() {
+    return query;
+  }
+
+  public void setQuery(FuzzyQuery query) {
+    this.query = query;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeFloat(query.getBoost());
+    new TermWritable(query.getTerm()).write(out);
+    out.writeInt(query.getMaxEdits());
+    out.writeInt(query.getPrefixLength());
+    out.writeInt(getMaxExpansions(query));
+    out.writeBoolean(getTranspositions(query));
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    float boost = in.readFloat();
+    TermWritable termWritable = new TermWritable();
+    termWritable.readFields(in);
+    Term term = termWritable.getTerm();
+    int maxEdits = in.readInt();
+    int prefixLength = in.readInt();
+    int maxExpansions = in.readInt();
+    boolean transpositions = in.readBoolean();
+    query = new FuzzyQuery(term, maxEdits, prefixLength, maxExpansions, transpositions);
+    query.setBoost(boost);
+  }
+
+  private static boolean getTranspositions(FuzzyQuery query) {
+    try {
+      return transpositionsField.getBoolean(query);
+    } catch (IllegalArgumentException e) {
+      throw new RuntimeException(e);
+    } catch (IllegalAccessException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private static int getMaxExpansions(FuzzyQuery query) {
+    try {
+      return maxExpansionsField.getInt(query);
+    } catch (IllegalArgumentException e) {
+      throw new RuntimeException(e);
+    } catch (IllegalAccessException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public Class<FuzzyQuery> getType() {
+    return FuzzyQuery.class;
+  }
+}


Mime
View raw message