lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rm...@apache.org
Subject svn commit: r1139458 [19/24] - in /lucene/dev/branches/solr2193: ./ dev-tools/eclipse/ dev-tools/idea/.idea/ dev-tools/idea/lucene/ dev-tools/idea/solr/ dev-tools/idea/solr/contrib/clustering/ dev-tools/maven/ dev-tools/maven/modules/analysis/icu/ dev-...
Date Fri, 24 Jun 2011 23:07:38 GMT
Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/Grouping.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/Grouping.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/Grouping.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/Grouping.java Fri Jun 24 23:07:10 2011
@@ -17,80 +17,524 @@
 
 package org.apache.solr.search;
 
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.search.*;
+import org.apache.lucene.search.grouping.*;
 import org.apache.lucene.util.BytesRef;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.schema.StrFieldSource;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.schema.*;
 import org.apache.solr.search.function.DocValues;
-import org.apache.solr.search.function.StringIndexDocValues;
+import org.apache.solr.search.function.FunctionQuery;
+import org.apache.solr.search.function.QueryValueSource;
 import org.apache.solr.search.function.ValueSource;
-import org.apache.solr.util.SentinelIntSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.*;
 
+/**
+ * Basic Solr Grouping infrastructure.
+ * Warning NOT thread save!
+ *
+ * @lucene.experimental
+ */
 public class Grouping {
 
-  public enum Format {Grouped, Simple}
+  private final static Logger logger = LoggerFactory.getLogger(Grouping.class);
+
+  private final SolrIndexSearcher searcher;
+  private final SolrIndexSearcher.QueryResult qr;
+  private final SolrIndexSearcher.QueryCommand cmd;
+  private final List<Command> commands = new ArrayList<Command>();
+  private final boolean main;
+  private final boolean cacheSecondPassSearch;
+  private final int maxDocsPercentageToCache;
+
+  private Sort sort;
+  private Sort groupSort;
+  private int limitDefault;
+  private int docsPerGroupDefault;
+  private int groupOffsetDefault;
+  private Format defaultFormat;
+  private TotalCount defaultTotalCount;
+
+  private int maxDoc;
+  private boolean needScores;
+  private boolean getDocSet;
+  private boolean getDocList; // doclist needed for debugging or highlighting
+  private Query query;
+  private DocSet filter;
+  private Filter luceneFilter;
+  private NamedList grouped = new SimpleOrderedMap();
+  private Set<Integer> idSet = new LinkedHashSet<Integer>();  // used for tracking unique docs when we need a doclist
+  private int maxMatches;  // max number of matches from any grouping command
+  private float maxScore = Float.NEGATIVE_INFINITY;  // max score seen in any doclist
+  private boolean signalCacheWarning = false;
+
+
+  public DocList mainResult;  // output if one of the grouping commands should be used as the main result.
+
+  /**
+   * @param searcher
+   * @param qr
+   * @param cmd
+   * @param cacheSecondPassSearch Whether to cache the documents and scores from the first pass search for the second
+   *                              pass search.
+   * @param maxDocsPercentageToCache The maximum number of documents in a percentage relative from maxdoc
+   *                                 that is allowed in the cache. When this threshold is met,
+   *                                 the cache is not used in the second pass search.
+   */
+  public Grouping(SolrIndexSearcher searcher,
+                  SolrIndexSearcher.QueryResult qr,
+                  SolrIndexSearcher.QueryCommand cmd,
+                  boolean cacheSecondPassSearch,
+                  int maxDocsPercentageToCache,
+                  boolean main) {
+    this.searcher = searcher;
+    this.qr = qr;
+    this.cmd = cmd;
+    this.cacheSecondPassSearch = cacheSecondPassSearch;
+    this.maxDocsPercentageToCache = maxDocsPercentageToCache;
+    this.main = main;
+  }
+
+  public void add(Grouping.Command groupingCommand) {
+    commands.add(groupingCommand);
+  }
+
+  /**
+   * Adds a field command based on the specified field.
+   * If the field is not compatible with {@link CommandField} it invokes the
+   * {@link #addFunctionCommand(String, org.apache.solr.request.SolrQueryRequest)} method.
+   *
+   * @param field The fieldname to group by.
+   */
+  public void addFieldCommand(String field, SolrQueryRequest request) throws ParseException {
+    SchemaField schemaField = searcher.getSchema().getField(field); // Throws an exception when field doesn't exist. Bad request.
+    FieldType fieldType = schemaField.getType();
+    ValueSource valueSource = fieldType.getValueSource(schemaField, null);
+    if (!(valueSource instanceof StrFieldSource)) {
+      addFunctionCommand(field, request);
+      return;
+    }
+
+    Grouping.CommandField gc = new CommandField();
+    gc.groupSort = groupSort;
+    gc.groupBy = field;
+    gc.key = field;
+    gc.numGroups = limitDefault;
+    gc.docsPerGroup = docsPerGroupDefault;
+    gc.groupOffset = groupOffsetDefault;
+    gc.offset = cmd.getOffset();
+    gc.sort = sort;
+    gc.format = defaultFormat;
+    gc.totalCount = defaultTotalCount;
+
+    if (main) {
+      gc.main = true;
+      gc.format = Grouping.Format.simple;
+    }
+
+    if (gc.format == Grouping.Format.simple) {
+      gc.groupOffset = 0;  // doesn't make sense
+    }
+    commands.add(gc);
+  }
+
+  public void addFunctionCommand(String groupByStr, SolrQueryRequest request) throws ParseException {
+    QParser parser = QParser.getParser(groupByStr, "func", request);
+    Query q = parser.getQuery();
+    final Grouping.Command gc;
+    if (q instanceof FunctionQuery) {
+      ValueSource valueSource = ((FunctionQuery)q).getValueSource();
+      if (valueSource instanceof StrFieldSource) {
+        String field = ((StrFieldSource) valueSource).getField();
+        CommandField commandField = new CommandField();
+        commandField.groupBy = field;
+        gc = commandField;
+      } else {
+        CommandFunc commandFunc = new CommandFunc();
+        commandFunc.groupBy = valueSource;
+        gc = commandFunc;
+      }
+    } else {
+      CommandFunc commandFunc = new CommandFunc();
+      commandFunc.groupBy = new QueryValueSource(q, 0.0f);
+      gc = commandFunc;
+    }
+    gc.groupSort = groupSort;
+    gc.key = groupByStr;
+    gc.numGroups = limitDefault;
+    gc.docsPerGroup = docsPerGroupDefault;
+    gc.groupOffset = groupOffsetDefault;
+    gc.offset = cmd.getOffset();
+    gc.sort = sort;
+    gc.format = defaultFormat;
+    gc.totalCount = defaultTotalCount;
+
+    if (main) {
+      gc.main = true;
+      gc.format = Grouping.Format.simple;
+    }
+
+    if (gc.format == Grouping.Format.simple) {
+      gc.groupOffset = 0;  // doesn't make sense
+    }
+
+    commands.add(gc);
+  }
+
+  public void addQueryCommand(String groupByStr, SolrQueryRequest request) throws ParseException {
+    QParser parser = QParser.getParser(groupByStr, null, request);
+    Query gq = parser.getQuery();
+    Grouping.CommandQuery gc = new CommandQuery();
+    gc.query = gq;
+    gc.groupSort = groupSort;
+    gc.key = groupByStr;
+    gc.numGroups = limitDefault;
+    gc.docsPerGroup = docsPerGroupDefault;
+    gc.groupOffset = groupOffsetDefault;
+
+    // these two params will only be used if this is for the main result set
+    gc.offset = cmd.getOffset();
+    gc.numGroups = limitDefault;
+    gc.format = defaultFormat;
+
+    if (main) {
+      gc.main = true;
+      gc.format = Grouping.Format.simple;
+    }
+    if (gc.format == Grouping.Format.simple) {
+      gc.docsPerGroup = gc.numGroups;  // doesn't make sense to limit to one
+      gc.groupOffset = gc.offset;
+    }
+
+    commands.add(gc);
+  }
+
+  public Grouping setSort(Sort sort) {
+    this.sort = sort;
+    return this;
+  }
+
+  public Grouping setGroupSort(Sort groupSort) {
+    this.groupSort = groupSort;
+    return this;
+  }
+
+  public Grouping setLimitDefault(int limitDefault) {
+    this.limitDefault = limitDefault;
+    return this;
+  }
+
+  public Grouping setDocsPerGroupDefault(int docsPerGroupDefault) {
+    this.docsPerGroupDefault = docsPerGroupDefault;
+    return this;
+  }
+
+  public Grouping setGroupOffsetDefault(int groupOffsetDefault) {
+    this.groupOffsetDefault = groupOffsetDefault;
+    return this;
+  }
+
+  public Grouping setDefaultFormat(Format defaultFormat) {
+    this.defaultFormat = defaultFormat;
+    return this;
+  }
+
+  public Grouping setDefaultTotalCount(TotalCount defaultTotalCount) {
+    this.defaultTotalCount = defaultTotalCount;
+    return this;
+  }
+
+  public List<Command> getCommands() {
+    return commands;
+  }
+
+  public void execute() throws IOException {
+    if (commands.isEmpty()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify at least on field, function or query to group by.");
+    }
+
+    DocListAndSet out = new DocListAndSet();
+    qr.setDocListAndSet(out);
+
+    filter = cmd.getFilter() != null ? cmd.getFilter() : searcher.getDocSet(cmd.getFilterList());
+    luceneFilter = filter == null ? null : filter.getTopFilter();
+    maxDoc = searcher.maxDoc();
+
+    needScores = (cmd.getFlags() & SolrIndexSearcher.GET_SCORES) != 0;
+    boolean cacheScores = false;
+    // NOTE: Change this when groupSort can be specified per group
+    if (!needScores && !commands.isEmpty()) {
+      if (commands.get(0).groupSort == null) {
+        cacheScores = true;
+      } else {
+        for (SortField field : commands.get(0).groupSort.getSort()) {
+          if (field.getType() == SortField.Type.SCORE) {
+            cacheScores = true;
+            break;
+          }
+        }
+      }
+    } else if (needScores) {
+      cacheScores = needScores;
+    }
+    getDocSet = (cmd.getFlags() & SolrIndexSearcher.GET_DOCSET) != 0;
+    getDocList = (cmd.getFlags() & SolrIndexSearcher.GET_DOCLIST) != 0;
+    query = QueryUtils.makeQueryable(cmd.getQuery());
+
+    for (Command cmd : commands) {
+      cmd.prepare();
+    }
+
+    List<Collector> collectors = new ArrayList<Collector>(commands.size());
+    for (Command cmd : commands) {
+      Collector collector = cmd.createFirstPassCollector();
+      if (collector != null)
+        collectors.add(collector);
+    }
+
+    Collector allCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
+    DocSetCollector setCollector = null;
+    if (getDocSet) {
+      setCollector = new DocSetDelegateCollector(maxDoc >> 6, maxDoc, allCollectors);
+      allCollectors = setCollector;
+    }
+
+    CachingCollector cachedCollector = null;
+    if (cacheSecondPassSearch && allCollectors != null) {
+      int maxDocsToCache = (int) Math.round(maxDoc * (maxDocsPercentageToCache / 100.0d));
+      // Only makes sense to cache if we cache more than zero.
+      // Maybe we should have a minimum and a maximum, that defines the window we would like caching for.
+      if (maxDocsToCache > 0) {
+        allCollectors = cachedCollector = CachingCollector.create(allCollectors, cacheScores, maxDocsToCache);
+      }
+    }
+
+    if (allCollectors != null) {
+      searcher.search(query, luceneFilter, allCollectors);
+    }
+
+    if (getDocSet) {
+      qr.setDocSet(setCollector.getDocSet());
+    }
+
+    collectors.clear();
+    for (Command cmd : commands) {
+      Collector collector = cmd.createSecondPassCollector();
+      if (collector != null)
+        collectors.add(collector);
+    }
+
+    if (!collectors.isEmpty()) {
+      Collector secondPhaseCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
+      if (collectors.size() > 0) {
+        if (cachedCollector != null) {
+          if (cachedCollector.isCached()) {
+            cachedCollector.replay(secondPhaseCollectors);
+          } else {
+            signalCacheWarning = true;
+            logger.warn(String.format("The grouping cache is active, but not used because it exceeded the max cache limit of %d percent", maxDocsPercentageToCache));
+            logger.warn("Please increase cache size or disable group caching.");
+            searcher.search(query, luceneFilter, secondPhaseCollectors);
+          }
+        } else {
+          searcher.search(query, luceneFilter, secondPhaseCollectors);
+        }
+      }
+    }
+
+    for (Command cmd : commands) {
+      cmd.finish();
+    }
+
+    qr.groupedResults = grouped;
+
+    if (getDocList) {
+      int sz = idSet.size();
+      int[] ids = new int[sz];
+      int idx = 0;
+      for (int val : idSet) {
+        ids[idx++] = val;
+      }
+      qr.setDocList(new DocSlice(0, sz, ids, null, maxMatches, maxScore));
+    }
+  }
+
+  /**
+   * Returns offset + len if len equals zero or higher. Otherwise returns max.
+   *
+   * @param offset The offset
+   * @param len The number of documents to return
+   * @param max The number of document to return if len < 0 or if offset + len < 0
+   * @return offset + len if len equals zero or higher. Otherwise returns max
+   */
+  int getMax(int offset, int len, int max) {
+    int v = len < 0 ? max : offset + len;
+    if (v < 0 || v > max) v = max;
+    return v;
+  }
+
+  /**
+   * Returns whether a cache warning should be send to the client.
+   * The value <code>true</code> is returned when the cache is emptied because the caching limits where met, otherwise
+   * <code>false</code> is returned.
+   *
+   * @return whether a cache warning should be send to the client
+   */
+  public boolean isSignalCacheWarning() {
+    return signalCacheWarning;
+  }
+
+  //======================================   Inner classes =============================================================
+
+  public static enum Format {
+
+    /**
+     * Grouped result. Each group has its own result set.
+     */
+    grouped,
+
+    /**
+     * Flat result. All documents of all groups are put in one list.
+     */
+    simple
+  }
+
+  public static enum TotalCount {
+    /**
+     * Computations should be based on groups.
+     */
+    grouped,
+
+    /**
+     * Computations should be based on plain documents, so not taking grouping into account.
+     */
+    ungrouped
+  }
+
+  /**
+   * General group command. A group command is responsible for creating the first and second pass collectors.
+   * A group command is also responsible for creating the response structure.
+   * <p/>
+   * Note: Maybe the creating the response structure should be done in something like a ReponseBuilder???
+   * Warning NOT thread save!
+   */
+  public abstract class Command<GROUP_VALUE_TYPE> {
 
-  public abstract class Command {
     public String key;       // the name to use for this group in the response
     public Sort groupSort;   // the sort of the documents *within* a single group.
     public Sort sort;        // the sort between groups
     public int docsPerGroup; // how many docs in each group - from "group.limit" param, default=1
     public int groupOffset;  // the offset within each group (for paging within each group)
     public int numGroups;    // how many groups - defaults to the "rows" parameter
+    int actualGroupsToFind;  // How many groups should actually be found. Based on groupOffset and numGroups.
     public int offset;       // offset into the list of groups
     public Format format;
     public boolean main;     // use as the main result in simple format (grouped.main=true param)
+    public TotalCount totalCount = TotalCount.ungrouped;
+
+    TopGroups<GROUP_VALUE_TYPE> result;
 
 
-    abstract void prepare() throws IOException;
-    abstract Collector createCollector() throws IOException;
-    Collector createNextCollector() throws IOException {
+    /**
+     * Prepare this <code>Command</code> for execution.
+     *
+     * @throws IOException If I/O related errors occur
+     */
+    protected abstract void prepare() throws IOException;
+
+    /**
+     * Returns one or more {@link Collector} instances that are needed to perform the first pass search.
+     * If multiple Collectors are returned then these wrapped in a {@link org.apache.lucene.search.MultiCollector}.
+     *
+     * @return one or more {@link Collector} instances that are need to perform the first pass search
+     * @throws IOException If I/O related errors occur
+     */
+    protected abstract Collector createFirstPassCollector() throws IOException;
+
+    /**
+     * Returns zero or more {@link Collector} instances that are needed to perform the second pass search.
+     * In the case when no {@link Collector} instances are created <code>null</code> is returned.
+     * If multiple Collectors are returned then these wrapped in a {@link org.apache.lucene.search.MultiCollector}.
+     *
+     * @return zero or more {@link Collector} instances that are needed to perform the second pass search
+     * @throws IOException If I/O related errors occur
+     */
+    protected Collector createSecondPassCollector() throws IOException {
       return null;
     }
-    abstract void finish() throws IOException;
 
-    abstract int getMatches();
+    /**
+     * Performs any necessary post actions to prepare the response.
+     *
+     * @throws IOException If I/O related errors occur
+     */
+    protected abstract void finish() throws IOException;
+
+    /**
+     * Returns the number of matches for this <code>Command</code>.
+     *
+     * @return the number of matches for this <code>Command</code>
+     */
+    public abstract int getMatches();
+
+    /**
+     * Returns the number of groups found for this <code>Command</code>.
+     * If the command doesn't support counting the groups <code>null</code> is returned.
+     *
+     * @return the number of groups found for this <code>Command</code>
+     */
+    protected Integer getNumberOfGroups() {
+      return null;
+    }
 
-    NamedList commonResponse() {
+    protected NamedList commonResponse() {
       NamedList groupResult = new SimpleOrderedMap();
       grouped.add(key, groupResult);  // grouped={ key={
 
-      int this_matches = getMatches();
-      groupResult.add("matches", this_matches);
-      maxMatches = Math.max(maxMatches, this_matches);
+      int matches = getMatches();
+      groupResult.add("matches", matches);
+      if (totalCount == TotalCount.grouped) {
+        Integer totalNrOfGroups = getNumberOfGroups();
+        groupResult.add("ngroups", totalNrOfGroups == null ? 0 : totalNrOfGroups);
+      }
+      maxMatches = Math.max(maxMatches, matches);
       return groupResult;
     }
 
-    DocList getDocList(TopDocsCollector collector) {
-      int max = collector.getTotalHits();
+    protected DocList getDocList(GroupDocs groups) {
+      int max = groups.totalHits;
       int off = groupOffset;
       int len = docsPerGroup;
-      if (format == Format.Simple) {
+      if (format == Format.simple) {
         off = offset;
         len = numGroups;
       }
       int docsToCollect = getMax(off, len, max);
 
       // TODO: implement a DocList impl that doesn't need to start at offset=0
-      TopDocs topDocs = collector.topDocs(0, Math.max(docsToCollect,1));  // 0 isn't supported as a valid value
-      int docsCollected = Math.min(docsToCollect, topDocs.scoreDocs.length);
+      int docsCollected = Math.min(docsToCollect, groups.scoreDocs.length);
 
       int ids[] = new int[docsCollected];
       float[] scores = needScores ? new float[docsCollected] : null;
-      for (int i=0; i<ids.length; i++) {
-        ids[i] = topDocs.scoreDocs[i].doc;
+      for (int i = 0; i < ids.length; i++) {
+        ids[i] = groups.scoreDocs[i].doc;
         if (scores != null)
-          scores[i] = topDocs.scoreDocs[i].score;
+          scores[i] = groups.scoreDocs[i].score;
       }
 
-      float score = topDocs.getMaxScore();
+      float score = groups.maxScore;
       maxScore = Math.max(maxScore, score);
-      DocSlice docs = new DocSlice(off, Math.max(0, ids.length - off), ids, scores, topDocs.totalHits, score);
+      DocSlice docs = new DocSlice(off, Math.max(0, ids.length - off), ids, scores, groups.totalHits, score);
 
       if (getDocList) {
         DocIterator iter = docs.iterator();
@@ -100,101 +544,134 @@ public class Grouping {
       return docs;
     }
 
-    void addDocList(NamedList rsp, TopDocsCollector collector) {
-      rsp.add("doclist", getDocList(collector));
+    protected void addDocList(NamedList rsp, GroupDocs groups) {
+      rsp.add("doclist", getDocList(groups));
     }
-  }
 
-  public class CommandQuery extends Command {
-    public Query query;
+    // Flatten the groups and get up offset + rows documents
+    protected DocList createSimpleResponse() {
+      GroupDocs[] groups = result != null ? result.groups : new GroupDocs[0];
+
+      List<Integer> ids = new ArrayList<Integer>();
+      List<Float> scores = new ArrayList<Float>();
+      int docsToGather = getMax(offset, numGroups, maxDoc);
+      int docsGathered = 0;
+      float maxScore = Float.NEGATIVE_INFINITY;
+
+      outer:
+      for (GroupDocs group : groups) {
+        if (group.maxScore > maxScore) {
+          maxScore = group.maxScore;
+        }
 
-    TopDocsCollector topCollector;
-    FilterCollector collector;
+        for (ScoreDoc scoreDoc : group.scoreDocs) {
+          if (docsGathered >= docsToGather) {
+            break outer;
+          }
 
-    @Override
-    void prepare() throws IOException {
-    }
+          ids.add(scoreDoc.doc);
+          scores.add(scoreDoc.score);
+          docsGathered++;
+        }
+      }
 
-    @Override
-    Collector createCollector() throws IOException {
-      int docsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
-      DocSet groupFilt = searcher.getDocSet(query);
-      topCollector = newCollector(groupSort, docsToCollect, false, needScores);
-      collector = new FilterCollector(groupFilt, topCollector);
-      return collector;
-    }
+      int len = Math.min(numGroups, docsGathered);
+      if (offset > len) {
+        len = 0;
+      }
 
-    @Override
-    void finish() throws IOException {
-      if (main) {
-        mainResult = getDocList((TopDocsCollector)collector.getCollector());
-      } else {
-        NamedList rsp = commonResponse();
-        addDocList(rsp, (TopDocsCollector)collector.getCollector());
+      int[] docs = ArrayUtils.toPrimitive(ids.toArray(new Integer[ids.size()]));
+      float[] docScores = ArrayUtils.toPrimitive(scores.toArray(new Float[scores.size()]));
+      DocSlice docSlice = new DocSlice(offset, len, docs, docScores, getMatches(), maxScore);
+
+      if (getDocList) {
+        for (int i = offset; i < docs.length; i++) {
+          idSet.add(docs[i]);
+        }
       }
-    }
 
-    @Override
-    int getMatches() {
-      return collector.getMatches();
+      return docSlice;
     }
-  }
 
-  
-  public class CommandFunc extends Command {
-    public ValueSource groupBy;
+  }
 
+  /**
+   * A group command for grouping on a field.
+   */
+  public class CommandField extends Command<BytesRef> {
 
-    int maxGroupToFind;
-    Map context;
-    TopGroupCollector collector = null;
-    Phase2GroupCollector collector2;
-    
-    @Override
-    void prepare() throws IOException {
-        Map context = ValueSource.newContext(searcher);
-        groupBy.createWeight(context, searcher);
-    }
+    public String groupBy;
+    TermFirstPassGroupingCollector firstPass;
+    TermSecondPassGroupingCollector secondPass;
 
-    @Override
-    Collector createCollector() throws IOException {
-      maxGroupToFind = getMax(offset, numGroups, maxDoc);
+    TermAllGroupsCollector allGroupsCollector;
 
-      // if we aren't going to return any groups, disregard the offset 
-      if (numGroups == 0) maxGroupToFind = 0;
+    // If offset falls outside the number of documents a group can provide use this collector instead of secondPass
+    TotalHitCountCollector fallBackCollector;
+    Collection<SearchGroup<BytesRef>> topGroups;
 
-      collector = new TopGroupCollector(groupBy, context, searcher.weightSort(normalizeSort(sort)), maxGroupToFind);
+    /**
+     * {@inheritDoc}
+     */
+    protected void prepare() throws IOException {
+      actualGroupsToFind = getMax(offset, numGroups, maxDoc);
+    }
 
-      /*** if we need a different algorithm when sort != group.sort
-      if (compareSorts(sort, groupSort)) {
-        collector = new TopGroupCollector(groupBy, context, normalizeSort(sort), maxGroupToFind);
-      } else {
-        collector = new TopGroupSortCollector(groupBy, context, normalizeSort(sort), normalizeSort(groupSort), maxGroupToFind);
+    /**
+     * {@inheritDoc}
+     */
+    protected Collector createFirstPassCollector() throws IOException {
+      // Ok we don't want groups, but do want a total count
+      if (actualGroupsToFind <= 0) {
+        fallBackCollector = new TotalHitCountCollector();
+        return fallBackCollector;
       }
-      ***/
-      return collector;
-    }
 
-    @Override
-    Collector createNextCollector() throws IOException {
-      if (numGroups == 0) return null;
+      sort = sort == null ? Sort.RELEVANCE : sort;
+      firstPass = new TermFirstPassGroupingCollectorJava6(groupBy, sort, actualGroupsToFind);
+      return firstPass;
+    }
 
-      int docsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
-      docsToCollect = Math.max(docsToCollect, 1);
+    /**
+     * {@inheritDoc}
+     */
+    protected Collector createSecondPassCollector() throws IOException {
+      if (actualGroupsToFind <= 0) {
+        allGroupsCollector = new TermAllGroupsCollector(groupBy);
+        return totalCount == TotalCount.grouped ? allGroupsCollector : null;
+      }
 
-      // if the format is simple, don't skip groups (since we are counting docs, not groups)
-      int collectorOffset = format==Format.Simple ? 0 : offset;
+      topGroups = format == Format.grouped ? firstPass.getTopGroups(offset, false) : firstPass.getTopGroups(0, false);
+      if (topGroups == null) {
+        if (totalCount == TotalCount.grouped) {
+          allGroupsCollector = new TermAllGroupsCollector(groupBy);
+          fallBackCollector = new TotalHitCountCollector();
+          return MultiCollector.wrap(allGroupsCollector, fallBackCollector);
+        } else {
+          fallBackCollector = new TotalHitCountCollector();
+          return fallBackCollector;
+        }
+      }
 
-      if (groupBy instanceof StrFieldSource) {
-        collector2 = new Phase2StringGroupCollector(collector, groupBy, context, searcher.weightSort(groupSort), docsToCollect, needScores, collectorOffset);
+      int groupedDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
+      groupedDocsToCollect = Math.max(groupedDocsToCollect, 1);
+      secondPass = new TermSecondPassGroupingCollector(
+          groupBy, topGroups, sort, groupSort, groupedDocsToCollect, needScores, needScores, false
+      );
+
+      if (totalCount == TotalCount.grouped) {
+        allGroupsCollector = new TermAllGroupsCollector(groupBy);
+        return MultiCollector.wrap(secondPass, allGroupsCollector);
       } else {
-        collector2 = new Phase2GroupCollector(collector, groupBy, context, searcher.weightSort(groupSort), docsToCollect, needScores, collectorOffset);
+        return secondPass;
       }
-      return collector2;
     }
 
-    @Override
-    void finish() throws IOException {
+    /**
+     * {@inheritDoc}
+     */
+    protected void finish() throws IOException {
+      result = secondPass != null ? secondPass.getTopGroups(0) : null;
       if (main) {
         mainResult = createSimpleResponse();
         return;
@@ -202,7 +679,7 @@ public class Grouping {
 
       NamedList groupResult = commonResponse();
 
-      if (format == Format.Simple) {
+      if (format == Format.simple) {
         groupResult.add("doclist", createSimpleResponse());
         return;
       }
@@ -210,630 +687,403 @@ public class Grouping {
       List groupList = new ArrayList();
       groupResult.add("groups", groupList);        // grouped={ key={ groups=[
 
+      if (result == null) {
+        return;
+      }
+
       // handle case of rows=0
       if (numGroups == 0) return;
 
-      if (collector.orderedGroups == null) collector.buildSet();
-
-      int skipCount = offset;
-      for (SearchGroup group : collector.orderedGroups) {
-        if (skipCount > 0) {
-          skipCount--;
-          continue;
-        }
+      for (GroupDocs<BytesRef> group : result.groups) {
         NamedList nl = new SimpleOrderedMap();
         groupList.add(nl);                         // grouped={ key={ groups=[ {
 
-        nl.add("groupValue", group.groupValue.toObject());
 
-        SearchGroupDocs groupDocs = collector2.groupMap.get(group.groupValue);
-        addDocList(nl, groupDocs.collector);
-      }
-    }
-
-    private DocList createSimpleResponse() {
-      int docCount = numGroups;
-      int docOffset = offset;    
-      int docsToGather = getMax(docOffset, docCount, maxDoc);
-
-      float maxScore = Float.NEGATIVE_INFINITY; 
-      List<TopDocs> topDocsList = new ArrayList<TopDocs>();
-      int numDocs = 0;
-      for (SearchGroup group : collector.orderedGroups) {
-        SearchGroupDocs groupDocs = collector2.groupMap.get(group.groupValue);
-        
-        TopDocsCollector collector = groupDocs.collector;
-        int hits = collector.getTotalHits();
-
-        int num = Math.min(docsPerGroup, hits - groupOffset); // how many docs are in this group
-        if (num <= 0) continue;
-
-        TopDocs topDocs = collector.topDocs(groupOffset, Math.min(docsPerGroup,docsToGather-numDocs));
-        topDocsList.add(topDocs);
-        numDocs += topDocs.scoreDocs.length;
-
-        float score = topDocs.getMaxScore();
-        maxScore = Math.max(maxScore, score);
-
-        if (numDocs >= docsToGather) break;
-      }
-      assert numDocs <= docsToGather; // make sure we didn't gather too many
-      
-      int[] ids = new int[numDocs];
-      float[] scores = needScores ? new float[numDocs] : null;
-      int pos = 0;
-
-      for (TopDocs topDocs : topDocsList) {
-        for (ScoreDoc sd : topDocs.scoreDocs) {
-          ids[pos] = sd.doc;
-          if (scores != null) scores[pos] = sd.score;
-          pos++;
+        // To keep the response format compatable with trunk.
+        // In trunk MutableValue can convert an indexed value to its native type. E.g. string to int
+        // The only option I currently see is the use the FieldType for this
+        if (group.groupValue != null) {
+          SchemaField schemaField = searcher.getSchema().getField(groupBy);
+          FieldType fieldType = schemaField.getType();
+          String readableValue = fieldType.indexedToReadable(group.groupValue.utf8ToString());
+          Fieldable field = schemaField.createField(readableValue, 0.0f);
+          nl.add("groupValue", fieldType.toObject(field));
+        } else {
+          nl.add("groupValue", null);
         }
-      }
 
-      DocSlice docs = new DocSlice(docOffset, Math.max(0, ids.length - docOffset), ids, scores, getMatches(), maxScore);
+        addDocList(nl, group);
+      }
+    }
 
-      if (getDocList) {
-        DocIterator iter = docs.iterator();
-        while (iter.hasNext())
-          idSet.add(iter.nextDoc());
+    /**
+     * {@inheritDoc}
+     */
+    public int getMatches() {
+      if (result == null && fallBackCollector == null) {
+        return 0;
       }
 
-      return docs;
+      return result != null ? result.totalHitCount : fallBackCollector.getTotalHits();
     }
 
-    @Override
-    int getMatches() {
-      return collector.getMatches();
+    /**
+     * {@inheritDoc}
+     */
+    protected Integer getNumberOfGroups() {
+      return allGroupsCollector == null ? null : allGroupsCollector.getGroupCount();
     }
   }
 
+  /**
+   * A group command for grouping on a query.
+   */
+  //NOTE: doesn't need to be generic. Maybe Command interface --> First / Second pass abstract impl.
+  public class CommandQuery extends Command {
 
+    public Query query;
+    TopDocsCollector topCollector;
+    FilterCollector collector;
 
-  static Sort byScoreDesc = new Sort();
+    /**
+     * {@inheritDoc}
+     */
+    protected void prepare() throws IOException {
+      actualGroupsToFind = getMax(offset, numGroups, maxDoc);
+    }
 
-  static boolean compareSorts(Sort sort1, Sort sort2) {
-    return sort1 == sort2 || normalizeSort(sort1).equals(normalizeSort(sort2)); 
-  }
+    /**
+     * {@inheritDoc}
+     */
+    protected Collector createFirstPassCollector() throws IOException {
+      DocSet groupFilt = searcher.getDocSet(query);
+      topCollector = newCollector(groupSort, needScores);
+      collector = new FilterCollector(groupFilt, topCollector);
+      return collector;
+    }
 
-  /** returns a sort by score desc if null */
-  static Sort normalizeSort(Sort sort) {
-    return sort==null ? byScoreDesc : sort;
-  } 
+    TopDocsCollector newCollector(Sort sort, boolean needScores) throws IOException {
+      int groupDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
+      if (sort == null || sort == Sort.RELEVANCE) {
+        return TopScoreDocCollector.create(groupDocsToCollect, true);
+      } else {
+        return TopFieldCollector.create(searcher.weightSort(sort), groupDocsToCollect, false, needScores, needScores, true);
+      }
+    }
 
-  static int getMax(int offset, int len, int max) {
-    int v = len<0 ? max : offset + len;
-    if (v < 0 || v > max) v = max;
-    return v;
-  }
+    /**
+     * {@inheritDoc}
+     */
+    protected void finish() throws IOException {
+      TopDocsCollector topDocsCollector = (TopDocsCollector) collector.collector;
+      TopDocs topDocs = topDocsCollector.topDocs();
+      GroupDocs<String> groupDocs = new GroupDocs<String>(topDocs.getMaxScore(), topDocs.totalHits, topDocs.scoreDocs, query.toString(), null);
+      if (main) {
+        mainResult = getDocList(groupDocs);
+      } else {
+        NamedList rsp = commonResponse();
+        addDocList(rsp, groupDocs);
+      }
+    }
 
-  TopDocsCollector newCollector(Sort sort, int numHits, boolean fillFields, boolean needScores) throws IOException {
-    if (sort==null || sort==byScoreDesc) {
-      return TopScoreDocCollector.create(numHits, true);
-    } else {
-      return TopFieldCollector.create(searcher.weightSort(sort), numHits, false, needScores, needScores, true);
+    /**
+     * {@inheritDoc}
+     */
+    public int getMatches() {
+      return collector.matches;
     }
   }
 
+  /**
+   * A command for grouping on a function.
+   */
+  public class CommandFunc extends Command<MutableValue> {
 
-  final SolrIndexSearcher searcher;
-  final SolrIndexSearcher.QueryResult qr;
-  final SolrIndexSearcher.QueryCommand cmd;
-  final List<Command> commands = new ArrayList<Command>();
-
-  public DocList mainResult;  // output if one of the grouping commands should be used as the main result.
-
-  public Grouping(SolrIndexSearcher searcher, SolrIndexSearcher.QueryResult qr, SolrIndexSearcher.QueryCommand cmd) {
-    this.searcher = searcher;
-    this.qr = qr;
-    this.cmd = cmd;
-  }
-
-  public void add(Grouping.Command groupingCommand) {
-    commands.add(groupingCommand);
-  }
+    public ValueSource groupBy;
+    Map context;
 
-  int maxDoc;
-  boolean needScores;
-  boolean getDocSet;
-  boolean getDocList; // doclist needed for debugging or highlighting
-  Query query;
-  DocSet filter;
-  Filter luceneFilter;
-  NamedList grouped = new SimpleOrderedMap();
-  Set<Integer> idSet = new LinkedHashSet<Integer>();  // used for tracking unique docs when we need a doclist
-  int maxMatches;  // max number of matches from any grouping command  
-  float maxScore = Float.NEGATIVE_INFINITY;  // max score seen in any doclist
-  
-  public void execute() throws IOException {
-    DocListAndSet out = new DocListAndSet();
-    qr.setDocListAndSet(out);
+    FunctionFirstPassGroupingCollector firstPass;
+    FunctionSecondPassGroupingCollector secondPass;
+    // If offset falls outside the number of documents a group can provide use this collector instead of secondPass
+    TotalHitCountCollector fallBackCollector;
+    FunctionAllGroupsCollector allGroupsCollector;
+    Collection<SearchGroup<MutableValue>> topGroups;
+
+    /**
+     * {@inheritDoc}
+     */
+    protected void prepare() throws IOException {
+      Map context = ValueSource.newContext(searcher);
+      groupBy.createWeight(context, searcher);
+      actualGroupsToFind = getMax(offset, numGroups, maxDoc);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected Collector createFirstPassCollector() throws IOException {
+      // Ok we don't want groups, but do want a total count
+      if (actualGroupsToFind <= 0) {
+        fallBackCollector = new TotalHitCountCollector();
+        return fallBackCollector;
+      }
+
+      sort = sort == null ? Sort.RELEVANCE : sort;
+      firstPass = new FunctionFirstPassGroupingCollector(groupBy, context, searcher.weightSort(sort), actualGroupsToFind);
+      return firstPass;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected Collector createSecondPassCollector() throws IOException {
+      if (actualGroupsToFind <= 0) {
+        allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context);
+        return totalCount == TotalCount.grouped ? allGroupsCollector : null;
+      }
+
+      topGroups = format == Format.grouped ? firstPass.getTopGroups(offset, false) : firstPass.getTopGroups(0, false);
+      if (topGroups == null) {
+        if (totalCount == TotalCount.grouped) {
+          allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context);
+          fallBackCollector = new TotalHitCountCollector();
+          return MultiCollector.wrap(allGroupsCollector, fallBackCollector);
+        } else {
+          fallBackCollector = new TotalHitCountCollector();
+          return fallBackCollector;
+        }
+      }
 
-    filter = cmd.getFilter()!=null ? cmd.getFilter() : searcher.getDocSet(cmd.getFilterList());
-    luceneFilter = filter == null ? null : filter.getTopFilter();
+      int groupdDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
+      groupdDocsToCollect = Math.max(groupdDocsToCollect, 1);
+      secondPass = new FunctionSecondPassGroupingCollector(
+          topGroups, sort, groupSort, groupdDocsToCollect, needScores, needScores, false, groupBy, context
+      );
+
+      if (totalCount == TotalCount.grouped) {
+        allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context);
+        return MultiCollector.wrap(secondPass, allGroupsCollector);
+      } else {
+        return secondPass;
+      }
+    }
 
-    maxDoc = searcher.maxDoc();
+    /**
+     * {@inheritDoc}
+     */
+    protected void finish() throws IOException {
+      result = secondPass != null ? secondPass.getTopGroups(0) : null;
+      if (main) {
+        mainResult = createSimpleResponse();
+        return;
+      }
 
-    needScores = (cmd.getFlags() & SolrIndexSearcher.GET_SCORES) != 0;
-    getDocSet = (cmd.getFlags() & SolrIndexSearcher.GET_DOCSET) != 0;
-    getDocList = (cmd.getFlags() & SolrIndexSearcher.GET_DOCLIST) != 0; // doclist needed for debugging or highlighting
-    query = QueryUtils.makeQueryable(cmd.getQuery());
+      NamedList groupResult = commonResponse();
 
-    for (Command cmd : commands) {
-      cmd.prepare();
-    }
-    
-    List<Collector> collectors = new ArrayList<Collector>(commands.size());
-    for (Command cmd : commands) {
-      Collector collector = cmd.createCollector();
-      if (collector != null)
-        collectors.add(collector);
-    }
+      if (format == Format.simple) {
+        groupResult.add("doclist", createSimpleResponse());
+        return;
+      }
 
-    Collector allCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
-    DocSetCollector setCollector = null;
-    if (getDocSet) {
-      setCollector = new DocSetDelegateCollector(maxDoc>>6, maxDoc, allCollectors);
-      allCollectors = setCollector;
-    }
+      List groupList = new ArrayList();
+      groupResult.add("groups", groupList);        // grouped={ key={ groups=[
 
-    searcher.search(query, luceneFilter, allCollectors);
+      if (result == null) {
+        return;
+      }
 
-    if (getDocSet) {
-      qr.setDocSet(setCollector.getDocSet());
-    }
+      // handle case of rows=0
+      if (numGroups == 0) return;
 
-    collectors.clear();
-    for (Command cmd : commands) {
-      Collector collector = cmd.createNextCollector();
-      if (collector != null)
-        collectors.add(collector);
+      for (GroupDocs<MutableValue> group : result.groups) {
+        NamedList nl = new SimpleOrderedMap();
+        groupList.add(nl);                         // grouped={ key={ groups=[ {
+        nl.add("groupValue", group.groupValue.toObject());
+        addDocList(nl, group);
+      }
     }
 
-    if (collectors.size() > 0) {
-      searcher.search(query, luceneFilter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()])));
-    }
+    /**
+     * {@inheritDoc}
+     */
+    public int getMatches() {
+      if (result == null && fallBackCollector == null) {
+        return 0;
+      }
 
-    for (Command cmd : commands) {
-      cmd.finish();
+      return result != null ? result.totalHitCount : fallBackCollector.getTotalHits();
     }
 
-    qr.groupedResults = grouped;
-
-    if (getDocList) {
-      int sz = idSet.size();
-      int[] ids = new int[sz];
-      int idx = 0;
-      for (int val : idSet) {
-        ids[idx++] = val;
-      }
-      qr.setDocList(new DocSlice(0, sz, ids, null, maxMatches, maxScore));
+    /**
+     * {@inheritDoc}
+     */
+    protected Integer getNumberOfGroups() {
+      return allGroupsCollector == null ? null : allGroupsCollector.getGroupCount();
     }
-  }
-
-}
 
-
-class SearchGroup {
-  public MutableValue groupValue;
-  int matches;
-  int topDoc;
-  // float topDocScore;  // currently unused
-  int comparatorSlot;
-
-  /***
-  @Override
-  public int hashCode() {
-    return super.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    return groupValue.equalsSameType(((SearchGroup)obj).groupValue);
   }
-  ***/
-}
 
-abstract class GroupCollector extends Collector {
-  /** get the number of matches before grouping or limiting have been applied */
-  public abstract int getMatches();
-}
+  /**
+   * A collector that filters incoming doc ids that are not in the filter
+   */
+  static class FilterCollector extends Collector {
 
-class FilterCollector extends GroupCollector {
-  private final DocSet filter;
-  private final Collector collector;
-  private int docBase;
-  private int matches;
+    final DocSet filter;
+    final Collector collector;
+    int docBase;
+    int matches;
 
-  public FilterCollector(DocSet filter, Collector collector) throws IOException {
-    this.filter = filter;
-    this.collector = collector;
-  }
-
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    collector.setScorer(scorer);
-  }
-
-  @Override
-  public void collect(int doc) throws IOException {
-    matches++;
-    if (filter.exists(doc + docBase)) {
-      collector.collect(doc);
+    public FilterCollector(DocSet filter, Collector collector) throws IOException {
+      this.filter = filter;
+      this.collector = collector;
     }
-  }
 
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    docBase = context.docBase;
-    collector.setNextReader(context);
-  }
+    public void setScorer(Scorer scorer) throws IOException {
+      collector.setScorer(scorer);
+    }
 
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return collector.acceptsDocsOutOfOrder();
-  }
+    public void collect(int doc) throws IOException {
+      matches++;
+      if (filter.exists(doc + docBase)) {
+        collector.collect(doc);
+      }
+    }
 
-  @Override
-  public int getMatches() {
-    return matches;
-  }
+    public void setNextReader(AtomicReaderContext context) throws IOException {
+      this.docBase = context.docBase;
+      collector.setNextReader(context);
+    }
 
-  Collector getCollector() {
-    return collector;
+    public boolean acceptsDocsOutOfOrder() {
+      return collector.acceptsDocsOutOfOrder();
+    }
   }
-}
 
+  static class FunctionFirstPassGroupingCollector extends AbstractFirstPassGroupingCollector<MutableValue> {
 
+    private final ValueSource groupByVS;
+    private final Map vsContext;
 
+    private DocValues docValues;
+    private DocValues.ValueFiller filler;
+    private MutableValue mval;
 
-/** Finds the top set of groups, grouped by groupByVS when sort == group.sort */
-class TopGroupCollector extends GroupCollector {
-  final int nGroups;
-  final HashMap<MutableValue, SearchGroup> groupMap;
-  TreeSet<SearchGroup> orderedGroups;
-  final ValueSource vs;
-  final Map context;
-  final FieldComparator[] comparators;
-  final int[] reversed;
-
-  DocValues docValues;
-  DocValues.ValueFiller filler;
-  MutableValue mval;
-  Scorer scorer;
-  int docBase;
-  int spareSlot;
-
-  int matches;
-
-  public TopGroupCollector(ValueSource groupByVS, Map vsContext, Sort weightedSort, int nGroups) throws IOException {
-    this.vs = groupByVS;
-    this.context = vsContext;
-    this.nGroups = nGroups = Math.max(1,nGroups);  // we need a minimum of 1 for this collector
-
-    SortField[] sortFields = weightedSort.getSort();
-    this.comparators = new FieldComparator[sortFields.length];
-    this.reversed = new int[sortFields.length];
-    for (int i = 0; i < sortFields.length; i++) {
-      SortField sortField = sortFields[i];
-      reversed[i] = sortField.getReverse() ? -1 : 1;
-      // use nGroups + 1 so we have a spare slot to use for comparing (tracked by this.spareSlot)
-      comparators[i] = sortField.getComparator(nGroups + 1, i);
-    }
-    this.spareSlot = nGroups;
-
-    this.groupMap = new HashMap<MutableValue, SearchGroup>(nGroups);
-  }
-
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    this.scorer = scorer;
-    for (FieldComparator fc : comparators)
-      fc.setScorer(scorer);
-  }
-
-  @Override
-  public void collect(int doc) throws IOException {
-    matches++;
-
-    // if orderedGroups != null, then we already have collected N groups and
-    // can short circuit by comparing this document to the smallest group
-    // without having to even find what group this document belongs to.
-    // Even if this document belongs to a group in the top N, we know that
-    // we don't have to update that group.
-    //
-    // Downside: if the number of unique groups is very low, this is
-    // wasted effort as we will most likely be updating an existing group.
-    if (orderedGroups != null) {
-      for (int i = 0;; i++) {
-        final int c = reversed[i] * comparators[i].compareBottom(doc);
-        if (c < 0) {
-          // Definitely not competitive. So don't even bother to continue
-          return;
-        } else if (c > 0) {
-          // Definitely competitive.
-          break;
-        } else if (i == comparators.length - 1) {
-          // Here c=0. If we're at the last comparator, this doc is not
-          // competitive, since docs are visited in doc Id order, which means
-          // this doc cannot compete with any other document in the queue.
-          return;
-        }
-      }
+    FunctionFirstPassGroupingCollector(ValueSource groupByVS, Map vsContext, Sort groupSort, int topNGroups) throws IOException {
+      super(groupSort, topNGroups);
+      this.groupByVS = groupByVS;
+      this.vsContext = vsContext;
     }
 
-    filler.fillValue(doc);
-    SearchGroup group = groupMap.get(mval);
-    if (group == null) {
-      int num = groupMap.size();
-      if (groupMap.size() < nGroups) {
-        SearchGroup sg = new SearchGroup();
-        sg.groupValue = mval.duplicate();
-        sg.comparatorSlot = num++;
-        sg.matches = 1;
-        sg.topDoc = docBase + doc;
-        // sg.topDocScore = scorer.score();
-        for (FieldComparator fc : comparators)
-          fc.copy(sg.comparatorSlot, doc);
-        groupMap.put(sg.groupValue, sg);
-        if (groupMap.size() == nGroups) {
-          buildSet();
-        }
-        return;
-      }
-
-      // we already tested that the document is competitive, so replace
-      // the smallest group with this new group.
-
-      // remove current smallest group
-      SearchGroup smallest = orderedGroups.pollLast();
-      assert orderedGroups.size() == nGroups -1;
-
-      groupMap.remove(smallest.groupValue);
-
-      // reuse the removed SearchGroup
-      smallest.groupValue.copy(mval);
-      smallest.matches = 1;
-      smallest.topDoc = docBase + doc;
-      // smallest.topDocScore = scorer.score();
-      for (FieldComparator fc : comparators)
-        fc.copy(smallest.comparatorSlot, doc);
-
-      groupMap.put(smallest.groupValue, smallest);
-      orderedGroups.add(smallest);
-      assert orderedGroups.size() == nGroups;
-
-      for (FieldComparator fc : comparators)
-        fc.setBottom(orderedGroups.last().comparatorSlot);
-
-      return;
+    @Override
+    protected MutableValue getDocGroupValue(int doc) {
+      filler.fillValue(doc);
+      return mval;
     }
 
-    //
-    // update existing group
-    //
-
-    group.matches++; // TODO: these aren't valid if the group is every discarded then re-added.  keep track if there have been discards?
-
-    for (int i = 0;; i++) {
-      FieldComparator fc = comparators[i];
-      fc.copy(spareSlot, doc);
-
-      final int c = reversed[i] * fc.compare(group.comparatorSlot, spareSlot);
-      if (c < 0) {
-        // Definitely not competitive.
-        return;
-      } else if (c > 0) {
-        // Definitely competitive.
-        // Set remaining comparators
-        for (int j=i+1; j<comparators.length; j++)
-          comparators[j].copy(spareSlot, doc);
-        break;
-      } else if (i == comparators.length - 1) {
-        // Here c=0. If we're at the last comparator, this doc is not
-        // competitive, since docs are visited in doc Id order, which means
-        // this doc cannot compete with any other document in the queue.
-        return;
+    @Override
+    protected MutableValue copyDocGroupValue(MutableValue groupValue, MutableValue reuse) {
+      if (reuse != null) {
+        reuse.copy(groupValue);
+        return reuse;
       }
+      return groupValue.duplicate();
     }
 
-    // remove before updating the group since lookup is done via comparators
-    // TODO: optimize this
+    @Override
+    public void setNextReader(AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      docValues = groupByVS.getValues(vsContext, readerContext);
+      filler = docValues.getValueFiller();
+      mval = filler.getValue();
+    }
 
-    SearchGroup prevLast = null;
-    if (orderedGroups != null) {
-      prevLast = orderedGroups.last();
-      orderedGroups.remove(group);
-      assert orderedGroups.size() == nGroups-1;
-    }
-
-    group.topDoc = docBase + doc;
-    // group.topDocScore = scorer.score();
-    int tmp = spareSlot; spareSlot = group.comparatorSlot; group.comparatorSlot=tmp;  // swap slots
-
-    // re-add the changed group
-    if (orderedGroups != null) {
-      orderedGroups.add(group);
-      assert orderedGroups.size() == nGroups;
-      SearchGroup newLast = orderedGroups.last();
-      // if we changed the value of the last group, or changed which group was last, then update bottom
-      if (group == newLast || prevLast != newLast) {
-        for (FieldComparator fc : comparators)
-          fc.setBottom(newLast.comparatorSlot);
-      }
+    @Override
+    protected CollectedSearchGroup<MutableValue> pollLast() {
+      return orderedGroups.pollLast();
     }
   }
 
-  void buildSet() {
-    Comparator<SearchGroup> comparator = new Comparator<SearchGroup>() {
-      public int compare(SearchGroup o1, SearchGroup o2) {
-        for (int i = 0;; i++) {
-          FieldComparator fc = comparators[i];
-          int c = reversed[i] * fc.compare(o1.comparatorSlot, o2.comparatorSlot);
-          if (c != 0) {
-            return c;
-          } else if (i == comparators.length - 1) {
-            return o1.topDoc - o2.topDoc;
-          }
-        }
-      }
-    };
-
-    orderedGroups = new TreeSet<SearchGroup>(comparator);
-    orderedGroups.addAll(groupMap.values());
-    if (orderedGroups.size() == 0) return;
-    for (FieldComparator fc : comparators)
-      fc.setBottom(orderedGroups.last().comparatorSlot);
-  }
+  static class TermFirstPassGroupingCollectorJava6 extends TermFirstPassGroupingCollector {
+    public TermFirstPassGroupingCollectorJava6(String groupField, Sort groupSort, int topNGroups) throws IOException {
+      super(groupField, groupSort, topNGroups);
+    }
 
-  @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
-    this.docBase = readerContext.docBase;
-    docValues = vs.getValues(context, readerContext);
-    filler = docValues.getValueFiller();
-    mval = filler.getValue();
-    for (int i=0; i<comparators.length; i++)
-      comparators[i] = comparators[i].setNextReader(readerContext);
+    @Override
+    protected CollectedSearchGroup<BytesRef> pollLast() {
+      return orderedGroups.pollLast();
+    }
   }
 
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return false;
-  }
+  static class FunctionSecondPassGroupingCollector extends AbstractSecondPassGroupingCollector<MutableValue> {
 
-  @Override
-  public int getMatches() {
-    return matches;
-  }
-}
+    private final ValueSource groupByVS;
+    private final Map vsContext;
 
+    private DocValues docValues;
+    private DocValues.ValueFiller filler;
+    private MutableValue mval;
 
-class Phase2GroupCollector extends Collector {
-  final HashMap<MutableValue, SearchGroupDocs> groupMap;
-  final ValueSource vs;
-  final Map context;
-
-  DocValues docValues;
-  DocValues.ValueFiller filler;
-  MutableValue mval;
-  Scorer scorer;
-  int docBase;
-
-  // TODO: may want to decouple from the phase1 collector
-  public Phase2GroupCollector(TopGroupCollector topGroups, ValueSource groupByVS, Map vsContext, Sort weightedSort, int docsPerGroup, boolean getScores, int offset) throws IOException {
-    boolean getSortFields = false;
-
-    if (topGroups.orderedGroups == null)
-      topGroups.buildSet();
-
-    groupMap = new HashMap<MutableValue, SearchGroupDocs>(topGroups.groupMap.size());
-    for (SearchGroup group : topGroups.orderedGroups) {
-      if (offset > 0) {
-        offset--;
-        continue;
-      }
-      SearchGroupDocs groupDocs = new SearchGroupDocs();
-      groupDocs.groupValue = group.groupValue;
-      if (weightedSort==null)
-        groupDocs.collector = TopScoreDocCollector.create(docsPerGroup, true);        
-      else
-        groupDocs.collector = TopFieldCollector.create(weightedSort, docsPerGroup, getSortFields, getScores, getScores, true);
-      groupMap.put(groupDocs.groupValue, groupDocs);
-    }
-
-    this.vs = groupByVS;
-    this.context = vsContext;
-  }
-
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    this.scorer = scorer;
-    for (SearchGroupDocs group : groupMap.values())
-      group.collector.setScorer(scorer);
-  }
-
-  @Override
-  public void collect(int doc) throws IOException {
-    filler.fillValue(doc);
-    SearchGroupDocs group = groupMap.get(mval);
-    if (group == null) return;
-    group.collector.collect(doc);
-  }
-
-  @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
-    this.docBase = readerContext.docBase;
-    docValues = vs.getValues(context, readerContext);
-    filler = docValues.getValueFiller();
-    mval = filler.getValue();
-    for (SearchGroupDocs group : groupMap.values())
-      group.collector.setNextReader(readerContext);
-  }
-
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return false;
-  }
-}
+    FunctionSecondPassGroupingCollector(Collection<SearchGroup<MutableValue>> searchGroups, Sort groupSort, Sort withinGroupSort, int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields, ValueSource groupByVS, Map vsContext) throws IOException {
+      super(searchGroups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
+      this.groupByVS = groupByVS;
+      this.vsContext = vsContext;
+    }
 
-// TODO: merge with SearchGroup or not?
-// ad: don't need to build a new hashmap
-// disad: blows up the size of SearchGroup if we need many of them, and couples implementations
-class SearchGroupDocs {
-  public MutableValue groupValue;
-  TopDocsCollector collector;
-}
+    /**
+     * {@inheritDoc}
+     */
+    protected SearchGroupDocs<MutableValue> retrieveGroup(int doc) throws IOException {
+      filler.fillValue(doc);
+      return groupMap.get(mval);
+    }
 
+    /**
+     * {@inheritDoc}
+     */
+    public void setNextReader(AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      docValues = groupByVS.getValues(vsContext, readerContext);
+      filler = docValues.getValueFiller();
+      mval = filler.getValue();
+    }
+  }
 
 
-class Phase2StringGroupCollector extends Phase2GroupCollector {
-  FieldCache.DocTermsIndex index;
-  final SentinelIntSet ordSet;
-  final SearchGroupDocs[] groups;
-  final BytesRef spare = new BytesRef();
+  static class FunctionAllGroupsCollector extends AbstractAllGroupsCollector<MutableValue> {
 
-  public Phase2StringGroupCollector(TopGroupCollector topGroups, ValueSource groupByVS, Map vsContext, Sort weightedSort, int docsPerGroup, boolean getScores, int offset) throws IOException {
-    super(topGroups, groupByVS, vsContext,weightedSort,docsPerGroup,getScores,offset);
-    ordSet = new SentinelIntSet(groupMap.size(), -1);
-    groups = new SearchGroupDocs[ordSet.keys.length];
-  }
+    private final Map vsContext;
+    private final ValueSource groupBy;
+    private final SortedSet<MutableValue> groups = new TreeSet<MutableValue>();
 
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    this.scorer = scorer;
-    for (SearchGroupDocs group : groupMap.values())
-      group.collector.setScorer(scorer);
-  }
+    private DocValues docValues;
+    private DocValues.ValueFiller filler;
+    private MutableValue mval;
 
-  @Override
-  public void collect(int doc) throws IOException {
-    int slot = ordSet.find(index.getOrd(doc));
-    if (slot >= 0) {
-      groups[slot].collector.collect(doc);
+    FunctionAllGroupsCollector(ValueSource groupBy, Map vsContext) {
+      this.vsContext = vsContext;
+      this.groupBy = groupBy;
     }
-  }
 
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    super.setNextReader(context);
-    index = ((StringIndexDocValues)docValues).getDocTermsIndex();
+    public Collection<MutableValue> getGroups() {
+      return groups;
+    }
 
-    ordSet.clear();
-    for (SearchGroupDocs group : groupMap.values()) {
-      MutableValueStr gv = (MutableValueStr)group.groupValue;
-      int ord = 0;
-      if (gv.exists) {
-        ord = index.binarySearchLookup(((MutableValueStr)group.groupValue).value, spare);
-      }
-      if (ord >= 0) {
-        int slot = ordSet.put(ord);
-        groups[slot] = group;
+    public void collect(int doc) throws IOException {
+      filler.fillValue(doc);
+      if (!groups.contains(mval)) {
+        groups.add(mval.duplicate());
       }
     }
-  }
 
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return false;
+    /**
+     * {@inheritDoc}
+     */
+    public void setNextReader(AtomicReaderContext context) throws IOException {
+      docValues = groupBy.getValues(vsContext, context);
+      filler = docValues.getValueFiller();
+      mval = filler.getValue();
+    }
+
   }
-}
\ No newline at end of file
+
+}

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/HashDocSet.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/HashDocSet.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/HashDocSet.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/HashDocSet.java Fri Jun 24 23:07:10 2011
@@ -26,7 +26,7 @@ import org.apache.lucene.util.BitUtil;
  * in the set because it takes up less memory and is faster to iterate and take
  * set intersections.
  *
- * @version $Id$
+ *
  * @since solr 0.9
  */
 public final class HashDocSet extends DocSetBase {

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/JoinQParserPlugin.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/JoinQParserPlugin.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/JoinQParserPlugin.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/JoinQParserPlugin.java Fri Jun 24 23:07:10 2011
@@ -22,7 +22,6 @@ import org.apache.lucene.search.*;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.OpenBitSet;
-import org.apache.lucene.util.StringHelper;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -290,14 +289,14 @@ class JoinQuery extends Query {
       Bits toDeletedDocs = fromSearcher == toSearcher ? fromDeletedDocs : MultiFields.getDeletedDocs(toSearcher.getIndexReader());
 
       fromDeState = new SolrIndexSearcher.DocsEnumState();
-      fromDeState.fieldName = StringHelper.intern(fromField);
+      fromDeState.fieldName = fromField;
       fromDeState.deletedDocs = fromDeletedDocs;
       fromDeState.termsEnum = termsEnum;
       fromDeState.docsEnum = null;
       fromDeState.minSetSizeCached = minDocFreqFrom;
 
       toDeState = new SolrIndexSearcher.DocsEnumState();
-      toDeState.fieldName = StringHelper.intern(toField);
+      toDeState.fieldName = toField;
       toDeState.deletedDocs = toDeletedDocs;
       toDeState.termsEnum = toTermsEnum;
       toDeState.docsEnum = null;

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/LRUCache.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/LRUCache.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/LRUCache.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/LRUCache.java Fri Jun 24 23:07:10 2011
@@ -29,7 +29,7 @@ import java.net.URL;
 
 
 /**
- * @version $Id$
+ *
  */
 public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
 

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java Fri Jun 24 23:07:10 2011
@@ -56,7 +56,7 @@ public class MissingStringLastComparator
 
 // Copied from Lucene's TermOrdValComparator and modified since the Lucene version couldn't
 // be extended.
-class TermOrdValComparator_SML extends FieldComparator {
+class TermOrdValComparator_SML extends FieldComparator<BytesRef> {
   private static final int NULL_ORD = Integer.MAX_VALUE;
 
   private final int[] ords;
@@ -98,7 +98,7 @@ class TermOrdValComparator_SML extends F
   }
 
   @Override
-  public Comparable<?> value(int slot) {
+  public BytesRef value(int slot) {
     throw new UnsupportedOperationException();
   }
 
@@ -111,7 +111,7 @@ class TermOrdValComparator_SML extends F
   // ords) per-segment comparator.  NOTE: this is messy;
   // we do this only because hotspot can't reliably inline
   // the underlying array access when looking up doc->ord
-  private static abstract class PerSegmentComparator extends FieldComparator {
+  private static abstract class PerSegmentComparator extends FieldComparator<BytesRef> {
     protected TermOrdValComparator_SML parent;
     protected final int[] ords;
     protected final BytesRef[] values;
@@ -199,7 +199,7 @@ class TermOrdValComparator_SML extends F
     }
 
     @Override
-    public Comparable<?> value(int slot) {
+    public BytesRef value(int slot) {
       return values==null ? parent.NULL_VAL : values[slot];
     }
   }

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QParser.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QParser.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QParser.java Fri Jun 24 23:07:10 2011
@@ -31,7 +31,7 @@ import java.util.*;
 /**
  * <b>Note: This API is experimental and may change in non backward-compatible ways in the future</b>
  * 
- * @version $Id$
+ *
  */
 public abstract class QParser {
   protected String qstr;

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryParsing.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryParsing.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryParsing.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryParsing.java Fri Jun 24 23:07:10 2011
@@ -32,6 +32,8 @@ import org.apache.lucene.search.SortFiel
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.SolrParams;
@@ -52,7 +54,7 @@ import java.util.Map;
 /**
  * Collection of static utilities useful for query parsing.
  *
- * @version $Id$
+ *
  */
 public class QueryParsing {
   public static final String OP = "q.op";  // the SolrParam used to override the QueryParser "default operator"
@@ -86,11 +88,16 @@ public class QueryParsing {
 
 
   // note to self: something needs to detect infinite recursion when parsing queries
-  static int parseLocalParams(String txt, int start, Map<String, String> target, SolrParams params) throws ParseException {
+  public static int parseLocalParams(String txt, int start, Map<String, String> target, SolrParams params) throws ParseException {
+    return parseLocalParams(txt, start, target, params, LOCALPARAM_START, LOCALPARAM_END);
+  }
+
+
+  public static int parseLocalParams(String txt, int start, Map<String, String> target, SolrParams params, String startString, char endChar) throws ParseException {
     int off = start;
-    if (!txt.startsWith(LOCALPARAM_START, off)) return start;
+    if (!txt.startsWith(startString, off)) return start;
     StrParser p = new StrParser(txt, start, txt.length());
-    p.pos += 2; // skip over "{!"
+    p.pos += startString.length(); // skip over "{!"
 
     for (; ;) {
       /*
@@ -99,13 +106,13 @@ public class QueryParsing {
       }
       */
       char ch = p.peek();
-      if (ch == LOCALPARAM_END) {
+      if (ch == endChar) {
         return p.pos + 1;
       }
 
       String id = p.getId();
       if (id.length() == 0) {
-        throw new ParseException("Expected identifier '}' parsing local params '" + txt + '"');
+        throw new ParseException("Expected ending character '" + endChar + "' parsing local params '" + txt + '"');
 
       }
       String val = null;
@@ -129,7 +136,7 @@ public class QueryParsing {
         if (ch == '\"' || ch == '\'') {
           val = p.getQuotedString();
         } else {
-          // read unquoted literal ended by whitespace or '}'
+          // read unquoted literal ended by whitespace or endChar (normally '}')
           // there is no escaping.
           int valStart = p.pos;
           for (; ;) {
@@ -137,7 +144,7 @@ public class QueryParsing {
               throw new ParseException("Missing end to unquoted value starting at " + valStart + " str='" + txt + "'");
             }
             char c = p.val.charAt(p.pos);
-            if (c == LOCALPARAM_END || Character.isWhitespace(c)) {
+            if (c == endChar || Character.isWhitespace(c)) {
               val = p.val.substring(valStart, p.pos);
               break;
             }
@@ -155,6 +162,7 @@ public class QueryParsing {
     }
   }
 
+
   public static String encodeLocalParamVal(String val) {
     int len = val.length();
     int i = 0;
@@ -313,10 +321,10 @@ public class QueryParsing {
           if (top) {
             lst.add(SortField.FIELD_SCORE);
           } else {
-            lst.add(new SortField(null, SortField.SCORE, true));
+            lst.add(new SortField(null, SortField.Type.SCORE, true));
           }
         } else if (DOCID.equals(field)) {
-          lst.add(new SortField(null, SortField.DOC, top));
+          lst.add(new SortField(null, SortField.Type.DOC, top));
         } else {
           // try to find the field
           SchemaField sf = req.getSchema().getFieldOrNull(field);
@@ -382,6 +390,22 @@ public class QueryParsing {
     }
   }
 
+  static void writeFieldVal(BytesRef val, FieldType ft, Appendable out, int flags) throws IOException {
+    if (ft != null) {
+      try {
+        CharsRef readable = new CharsRef();
+        ft.indexedToReadable(val, readable);
+        out.append(readable);
+      } catch (Exception e) {
+        out.append("EXCEPTION(val=");
+        out.append(val.utf8ToString());
+        out.append(")");
+      }
+    } else {
+      out.append(val.utf8ToString());
+    }
+  }
+
   /**
    * @see #toString(Query,IndexSchema)
    */
@@ -392,14 +416,14 @@ public class QueryParsing {
       TermQuery q = (TermQuery) query;
       Term t = q.getTerm();
       FieldType ft = writeFieldName(t.field(), schema, out, flags);
-      writeFieldVal(t.text(), ft, out, flags);
+      writeFieldVal(t.bytes(), ft, out, flags);
     } else if (query instanceof TermRangeQuery) {
       TermRangeQuery q = (TermRangeQuery) query;
       String fname = q.getField();
       FieldType ft = writeFieldName(fname, schema, out, flags);
       out.append(q.includesLower() ? '[' : '{');
-      String lt = q.getLowerTerm().utf8ToString();
-      String ut = q.getUpperTerm().utf8ToString();
+      BytesRef lt = q.getLowerTerm();
+      BytesRef ut = q.getUpperTerm();
       if (lt == null) {
         out.append('*');
       } else {
@@ -441,7 +465,7 @@ public class QueryParsing {
       BooleanQuery q = (BooleanQuery) query;
       boolean needParens = false;
 
-      if (q.getBoost() != 1.0 || q.getMinimumNumberShouldMatch() != 0) {
+      if (q.getBoost() != 1.0 || q.getMinimumNumberShouldMatch() != 0 || q.isCoordDisabled()) {
         needParens = true;
       }
       if (needParens) {
@@ -487,6 +511,9 @@ public class QueryParsing {
         out.append('~');
         out.append(Integer.toString(q.getMinimumNumberShouldMatch()));
       }
+      if (q.isCoordDisabled()) {
+        out.append("/no_coord");
+      }
 
     } else if (query instanceof PrefixQuery) {
       PrefixQuery q = (PrefixQuery) query;

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryResultKey.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryResultKey.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryResultKey.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryResultKey.java Fri Jun 24 23:07:10 2011
@@ -24,7 +24,7 @@ import java.io.IOException;
 import java.util.List;
 
 /** A hash key encapsulating a query, a list of filters, and a sort
- * @version $Id$
+ *
  */
 public final class QueryResultKey {
   final Query query;

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryUtils.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryUtils.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/QueryUtils.java Fri Jun 24 23:07:10 2011
@@ -25,7 +25,7 @@ import org.apache.lucene.search.MatchAll
 import java.util.List;
 
 /**
- * @version $Id$
+ *
  */
 public class QueryUtils {
 

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/ReturnFields.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/ReturnFields.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/ReturnFields.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/ReturnFields.java Fri Jun 24 23:07:10 2011
@@ -16,20 +16,16 @@
  */
 package org.apache.solr.search;
 
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
+import java.util.*;
 
 import org.apache.commons.io.FilenameUtils;
 import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.search.Query;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.transform.DocTransformer;
 import org.apache.solr.response.transform.DocTransformers;
@@ -46,7 +42,7 @@ import org.slf4j.LoggerFactory;
 /**
  * A class representing the return fields
  *
- * @version $Id$
+ *
  * @since solr 4.0
  */
 public class ReturnFields
@@ -211,9 +207,37 @@ public class ReturnFields
           sp.pos = start;
         }
 
-        // let's try it as a function instead
         String funcStr = sp.val.substring(start);
 
+        // Is it an augmenter of the form [augmenter_name foo=1 bar=myfield]?
+        // This is identical to localParams syntax except it uses [] instead of {!}
+
+        if (funcStr.startsWith("[")) {
+          Map<String,String> augmenterArgs = new HashMap<String,String>();
+          int end = QueryParsing.parseLocalParams(funcStr, 0, augmenterArgs, req.getParams(), "[", ']');
+          sp.pos += end;
+          
+          // [foo] is short for [type=foo] in localParams syntax
+          String augmenterName = augmenterArgs.remove("type"); 
+          String disp = key;
+          if( disp == null ) {
+            disp = '['+augmenterName+']';
+          }
+
+          TransformerFactory factory = req.getCore().getTransformerFactory( augmenterName );
+          if( factory != null ) {
+            MapSolrParams augmenterParams = new MapSolrParams( augmenterArgs );
+            augmenters.addTransformer( factory.create(disp, augmenterParams, req) );
+          }
+          else {
+            // unknown transformer?
+          }
+          addField(field, disp, augmenters, req);
+          continue;
+        }
+
+
+        // let's try it as a function instead
         QParser parser = QParser.getParser(funcStr, FunctionQParserPlugin.NAME, req);
         Query q = null;
         ValueSource vs = null;
@@ -311,26 +335,6 @@ public class ReturnFields
       _wantsScore = true;
       augmenters.addTransformer( new ScoreAugmenter( disp ) );
     }
-    else if( field.charAt(0)=='_'&& field.charAt(field.length()-1)=='_' ) {
-      String name = field;
-      String args = null;
-      int idx = field.indexOf( ':' );
-      if( idx > 0 ) {
-        name = field.substring(1,idx);
-        args = field.substring(idx+1,field.length()-1);
-      }
-      else {
-        name = field.substring(1,field.length()-1 );
-      }
-
-      TransformerFactory factory = req.getCore().getTransformerFactory( name );
-      if( factory != null ) {
-        augmenters.addTransformer( factory.create(disp, args, req) );
-      }
-      else {
-        // unknown field?
-      }
-    }
   }
 
   public Set<String> getLuceneFieldNames()

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrCache.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrCache.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrCache.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrCache.java Fri Jun 24 23:07:10 2011
@@ -28,7 +28,7 @@ import java.io.IOException;
 /**
  * Primary API for dealing with Solr's internal caches.
  * 
- * @version $Id$
+ *
  */
 public interface SolrCache<K,V> extends SolrInfoMBean {
   public final static Logger log = LoggerFactory.getLogger(SolrCache.class);

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrFieldCacheMBean.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrFieldCacheMBean.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrFieldCacheMBean.java Fri Jun 24 23:07:10 2011
@@ -33,7 +33,7 @@ import org.apache.lucene.util.FieldCache
 /**
  * A SolrInfoMBean that provides introspection of the Lucene FiledCache, this is <b>NOT</b> a cache that is manged by Solr.
  *
- * @version $Id$
+ *
  */
 public class SolrFieldCacheMBean implements SolrInfoMBean {
 

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java Fri Jun 24 23:07:10 2011
@@ -54,7 +54,7 @@ import java.util.concurrent.atomic.Atomi
  * SolrIndexSearcher adds schema awareness and caching functionality
  * over the lucene IndexSearcher.
  *
- * @version $Id$
+ *
  * @since solr 0.9
  */
 public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean {
@@ -475,26 +475,7 @@ public class SolrIndexSearcher extends I
 
   /** Returns a weighted sort according to this searcher */
   public Sort weightSort(Sort sort) throws IOException {
-    if (sort == null) return null;
-    SortField[] sorts = sort.getSort();
-
-    boolean needsWeighting = false;
-    for (SortField sf : sorts) {
-      if (sf instanceof SolrSortField) {
-        needsWeighting = true;
-        break;
-      }
-    }
-    if (!needsWeighting) return sort;
-
-    SortField[] newSorts = Arrays.copyOf(sorts, sorts.length);
-    for (int i=0; i<newSorts.length; i++) {
-      if (newSorts[i] instanceof SolrSortField) {
-        newSorts[i] = ((SolrSortField)newSorts[i]).weight(this);
-      }
-    }
-
-    return new Sort(newSorts);
+    return (sort != null) ? sort.rewrite(this) : null;
   }
 
 
@@ -734,7 +715,7 @@ public class SolrIndexSearcher extends I
     TermQuery key = null;
 
     if (useCache) {
-      key = new TermQuery(new Term(deState.fieldName, new BytesRef(deState.termsEnum.term()), false));
+      key = new TermQuery(new Term(deState.fieldName, new BytesRef(deState.termsEnum.term())));
       DocSet result = filterCache.get(key);
       if (result != null) return result;
     }
@@ -1043,7 +1024,7 @@ public class SolrIndexSearcher extends I
       useFilterCache=true;
       SortField[] sfields = cmd.getSort().getSort();
       for (SortField sf : sfields) {
-        if (sf.getType() == SortField.SCORE) {
+        if (sf.getType() == SortField.Type.SCORE) {
           useFilterCache=false;
           break;
         }

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SortSpec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SortSpec.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SortSpec.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/SortSpec.java Fri Jun 24 23:07:10 2011
@@ -48,7 +48,7 @@ public class SortSpec 
   public static boolean includesScore(Sort sort) {
     if (sort==null) return true;
     for (SortField sf : sort.getSort()) {
-      if (sf.getType() == SortField.SCORE) return true;
+      if (sf.getType() == SortField.Type.SCORE) return true;
     }
     return false;
   }

Modified: lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/Sorting.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/Sorting.java?rev=1139458&r1=1139457&r2=1139458&view=diff
==============================================================================
--- lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/Sorting.java (original)
+++ lucene/dev/branches/solr2193/solr/src/java/org/apache/solr/search/Sorting.java Fri Jun 24 23:07:10 2011
@@ -22,7 +22,7 @@ import org.apache.lucene.search.*;
 /**
  * Extra lucene sorting utilities & convenience methods
  *
- * @version $Id$
+ *
  *
  */
 
@@ -42,12 +42,12 @@ public class Sorting {
   public static SortField getStringSortField(String fieldName, boolean reverse, boolean nullLast, boolean nullFirst) {
     if (nullLast) {
       if (!reverse) return new SortField(fieldName, nullStringLastComparatorSource);
-      else return new SortField(fieldName, SortField.STRING, true);
+      else return new SortField(fieldName, SortField.Type.STRING, true);
     } else if (nullFirst) {
       if (reverse) return new SortField(fieldName, nullStringLastComparatorSource, true);
-      else return new SortField(fieldName, SortField.STRING, false);
+      else return new SortField(fieldName, SortField.Type.STRING, false);
     } else {
-      return new SortField(fieldName, SortField.STRING, reverse);
+      return new SortField(fieldName, SortField.Type.STRING, reverse);
     }
   }
 



Mime
View raw message