lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From busc...@apache.org
Subject svn commit: r822587 [1/2] - in /lucene/java/trunk: ./ contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/ contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ contrib/ant/src/java/org/apache/lucene/ant/ contrib/ant/src/...
Date Wed, 07 Oct 2009 05:08:24 GMT
Author: buschmi
Date: Wed Oct  7 05:08:22 2009
New Revision: 822587

URL: http://svn.apache.org/viewvc?rev=822587&view=rev
Log:
LUCENE-1856: Remove Hits.

Modified:
    lucene/java/trunk/CHANGES.txt
    lucene/java/trunk/common-build.xml
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
    lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
    lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
    lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
    lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java
    lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
    lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
    lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
    lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
    lucene/java/trunk/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
    lucene/java/trunk/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java
    lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java
    lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java
    lucene/java/trunk/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
    lucene/java/trunk/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java
    lucene/java/trunk/src/java/org/apache/lucene/search/Searcher.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermRangeFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermRangeQuery.java

Modified: lucene/java/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/java/trunk/CHANGES.txt?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/CHANGES.txt (original)
+++ lucene/java/trunk/CHANGES.txt Wed Oct  7 05:08:22 2009
@@ -28,6 +28,9 @@
   and NumericRangeFilter now have Integer, Long, Float, Double as type param.
   (Uwe Schindler)
 
+* LUCENE-1856: Remove Hits and all references from core and contrib.
+  (Michael Busch)
+
 Bug fixes
 
 New features

Modified: lucene/java/trunk/common-build.xml
URL: http://svn.apache.org/viewvc/lucene/java/trunk/common-build.xml?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/common-build.xml (original)
+++ lucene/java/trunk/common-build.xml Wed Oct  7 05:08:22 2009
@@ -42,7 +42,7 @@
   <property name="Name" value="Lucene"/>
   <property name="dev.version" value="3.0-dev"/>
   <property name="version" value="${dev.version}"/>
-  <property name="compatibility.tag" value="lucene_2_9_back_compat_tests_20091005"/>
+  <property name="compatibility.tag" value="lucene_2_9_back_compat_tests_20091006"/>
   <property name="spec.version" value="${version}"/>	
   <property name="year" value="2000-${current.year}"/>
   <property name="final.name" value="lucene-${name}-${version}"/>

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java Wed Oct  7 05:08:22 2009
@@ -16,8 +16,12 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.LetterTokenizer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -30,15 +34,10 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.store.RAMDirectory;
 
-import java.io.IOException;
-import java.io.Reader;
-import java.io.StringReader;
-
 public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
   String variedFieldValues[] = {"the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"};
   String repetitiveFieldValues[] = {"boring", "boring", "vaguelyboring"};
@@ -72,18 +71,18 @@
   }
 
   //Helper method to query
-  private Hits search(Analyzer a, String queryString) throws IOException, ParseException {
+  private int search(Analyzer a, String queryString) throws IOException, ParseException {
     QueryParser qp = new QueryParser("repetitiveField", a);
     Query q = qp.parse(queryString);
-    return new IndexSearcher(reader).search(q);
+    return new IndexSearcher(reader).search(q, null, 1000).totalHits;
   }
 
   public void testUninitializedAnalyzer() throws Exception {
     //Note: no calls to "addStopWord"
     String query = "variedField:quick repetitiveField:boring";
-    Hits h = search(protectedAnalyzer, query);
-    Hits h2 = search(appAnalyzer, query);
-    assertEquals("No filtering test", h.length(), h2.length());
+    int numHits1 = search(protectedAnalyzer, query);
+    int numHits2 = search(appAnalyzer, query);
+    assertEquals("No filtering test", numHits1, numHits2);
   }
 
   /*
@@ -91,8 +90,8 @@
     */
   public void testDefaultAddStopWordsIndexReader() throws Exception {
     protectedAnalyzer.addStopWords(reader);
-    Hits h = search(protectedAnalyzer, "repetitiveField:boring");
-    assertEquals("Default filter should remove all docs", 0, h.length());
+    int numHits = search(protectedAnalyzer, "repetitiveField:boring");
+    assertEquals("Default filter should remove all docs", 0, numHits);
   }
 
 
@@ -101,26 +100,26 @@
     */
   public void testAddStopWordsIndexReaderInt() throws Exception {
     protectedAnalyzer.addStopWords(reader, 1f / 2f);
-    Hits h = search(protectedAnalyzer, "repetitiveField:boring");
-    assertEquals("A filter on terms in > one half of docs remove boring docs", 0, h.length());
+    int numHits = search(protectedAnalyzer, "repetitiveField:boring");
+    assertEquals("A filter on terms in > one half of docs remove boring docs", 0, numHits);
 
-    h = search(protectedAnalyzer, "repetitiveField:vaguelyboring");
-    assertTrue("A filter on terms in > half of docs should not remove vaguelyBoring docs", h.length() > 1);
+    numHits = search(protectedAnalyzer, "repetitiveField:vaguelyboring");
+    assertTrue("A filter on terms in > half of docs should not remove vaguelyBoring docs", numHits > 1);
 
     protectedAnalyzer.addStopWords(reader, 1f / 4f);
-    h = search(protectedAnalyzer, "repetitiveField:vaguelyboring");
-    assertEquals("A filter on terms in > quarter of docs should remove vaguelyBoring docs", 0, h.length());
+    numHits = search(protectedAnalyzer, "repetitiveField:vaguelyboring");
+    assertEquals("A filter on terms in > quarter of docs should remove vaguelyBoring docs", 0, numHits);
   }
 
 
   public void testAddStopWordsIndexReaderStringFloat() throws Exception {
     protectedAnalyzer.addStopWords(reader, "variedField", 1f / 2f);
-    Hits h = search(protectedAnalyzer, "repetitiveField:boring");
-    assertTrue("A filter on one Field should not affect queris on another", h.length() > 0);
+    int numHits = search(protectedAnalyzer, "repetitiveField:boring");
+    assertTrue("A filter on one Field should not affect queris on another", numHits > 0);
 
     protectedAnalyzer.addStopWords(reader, "repetitiveField", 1f / 2f);
-    h = search(protectedAnalyzer, "repetitiveField:boring");
-    assertEquals("A filter on the right Field should affect queries on it", h.length(), 0);
+    numHits = search(protectedAnalyzer, "repetitiveField:boring");
+    assertEquals("A filter on the right Field should affect queries on it", numHits, 0);
   }
 
   public void testAddStopWordsIndexReaderStringInt() throws Exception {
@@ -138,11 +137,11 @@
 
   public void testNoFieldNamePollution() throws Exception {
     protectedAnalyzer.addStopWords(reader, "repetitiveField", 10);
-    Hits h = search(protectedAnalyzer, "repetitiveField:boring");
-    assertEquals("Check filter set up OK", 0, h.length());
+    int numHits = search(protectedAnalyzer, "repetitiveField:boring");
+    assertEquals("Check filter set up OK", 0, numHits);
 
-    h = search(protectedAnalyzer, "variedField:boring");
-    assertTrue("Filter should not prevent stopwords in one field being used in another ", h.length() > 0);
+    numHits = search(protectedAnalyzer, "variedField:boring");
+    assertTrue("Filter should not prevent stopwords in one field being used in another ", numHits > 0);
 
   }
   
@@ -162,8 +161,8 @@
   public void testLUCENE1678BWComp() throws Exception {
     QueryAutoStopWordAnalyzer a = new QueryAutoStopWordSubclassAnalyzer();
     a.addStopWords(reader, "repetitiveField", 10);
-    Hits h = search(a, "repetitiveField:boring");
-    assertFalse(h.length() == 0);
+    int numHits = search(a, "repetitiveField:boring");
+    assertFalse(numHits == 0);
   }
   
   /*
@@ -183,10 +182,10 @@
   public void testWrappingNonReusableAnalyzer() throws Exception {
     QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(new NonreusableAnalyzer());
     a.addStopWords(reader, 10);
-    Hits h = search(a, "repetitiveField:boring");
-    assertTrue(h.length() == 0);
-    h = search(a, "repetitiveField:vaguelyboring");
-    assertTrue(h.length() == 0);
+    int numHits = search(a, "repetitiveField:boring");
+    assertTrue(numHits == 0);
+    numHits = search(a, "repetitiveField:vaguelyboring");
+    assertTrue(numHits == 0);
   }
   
   public void testTokenStream() throws Exception {

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Wed Oct  7 05:08:22 2009
@@ -20,26 +20,26 @@
 import java.io.Reader;
 import java.io.StringReader;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.LetterTokenizer;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 
@@ -82,20 +82,20 @@
     return new IndexSearcher(dir, true);
   }
 
-  protected Hits queryParsingTest(Analyzer analyzer, String qs) throws Exception {
+  protected ScoreDoc[] queryParsingTest(Analyzer analyzer, String qs) throws Exception {
     searcher = setUpSearcher(analyzer);
 
     QueryParser qp = new QueryParser("content", analyzer);
 
     Query q = qp.parse(qs);
 
-    return searcher.search(q);
+    return searcher.search(q, null, 1000).scoreDocs;
   }
 
-  protected void compareRanks(Hits hits, int[] ranks) throws Exception {
-    assertEquals(ranks.length, hits.length());
+  protected void compareRanks(ScoreDoc[] hits, int[] ranks) throws Exception {
+    assertEquals(ranks.length, hits.length);
     for (int i = 0; i < ranks.length; i++) {
-      assertEquals(ranks[i], hits.id(i));
+      assertEquals(ranks[i], hits[i].doc);
     }
   }
 
@@ -104,7 +104,7 @@
    * tokenizes on whitespace.
    */
   public void testShingleAnalyzerWrapperQueryParsing() throws Exception {
-    Hits hits = queryParsingTest(new ShingleAnalyzerWrapper
+    ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
                                      (new WhitespaceAnalyzer(), 2),
                                  "test sentence");
     int[] ranks = new int[] { 1, 2, 0 };
@@ -115,7 +115,7 @@
    * This one fails with an exception.
    */
   public void testShingleAnalyzerWrapperPhraseQueryParsingFails() throws Exception {
-    Hits hits = queryParsingTest(new ShingleAnalyzerWrapper
+    ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
                                      (new WhitespaceAnalyzer(), 2),
                                  "\"this sentence\"");
     int[] ranks = new int[] { 0 };
@@ -126,7 +126,7 @@
    * This one works, actually.
    */
   public void testShingleAnalyzerWrapperPhraseQueryParsing() throws Exception {
-    Hits hits = queryParsingTest(new ShingleAnalyzerWrapper
+    ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
                                      (new WhitespaceAnalyzer(), 2),
                                  "\"test sentence\"");
     int[] ranks = new int[] { 1 };
@@ -137,7 +137,7 @@
    * Same as above, is tokenized without using the analyzer.
    */
   public void testShingleAnalyzerWrapperRequiredQueryParsing() throws Exception {
-    Hits hits = queryParsingTest(new ShingleAnalyzerWrapper
+    ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
                                      (new WhitespaceAnalyzer(), 2),
                                  "+test +sentence");
     int[] ranks = new int[] { 1, 2 };
@@ -166,7 +166,7 @@
       q.add(new Term("content", termText), j);
     }
 
-    Hits hits = searcher.search(q);
+    ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
     int[] ranks = new int[] { 0 };
     compareRanks(hits, ranks);
   }
@@ -193,7 +193,7 @@
             BooleanClause.Occur.SHOULD);
     }
 
-    Hits hits = searcher.search(q);
+    ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
     int[] ranks = new int[] { 1, 2, 0 };
     compareRanks(hits, ranks);
   }

Modified: lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java (original)
+++ lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java Wed Oct  7 05:08:22 2009
@@ -17,44 +17,42 @@
  * limitations under the License.
  */
 
+import java.io.File;
+import java.io.IOException;
+import java.text.ParseException;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.Vector;
+
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.StopAnalyzer;
 import org.apache.lucene.analysis.SimpleAnalyzer;
+import org.apache.lucene.analysis.StopAnalyzer;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.DateTools;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.FSDirectory;
 import org.apache.tools.ant.BuildException;
-import org.apache.tools.ant.DirectoryScanner;
 import org.apache.tools.ant.DynamicConfigurator;
 import org.apache.tools.ant.Project;
 import org.apache.tools.ant.Task;
-import org.apache.tools.ant.types.FileSet;
 import org.apache.tools.ant.types.EnumeratedAttribute;
+import org.apache.tools.ant.types.FileSet;
 import org.apache.tools.ant.types.Resource;
 import org.apache.tools.ant.types.ResourceCollection;
 import org.apache.tools.ant.types.resources.FileResource;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.Properties;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.ArrayList;
-import java.util.Vector;
-import java.text.ParseException;
-
 /**
  *  Ant task to index files with Lucene
  *
@@ -311,14 +309,14 @@
                   new Term("path", file.getPath());
                 TermQuery query =
                   new TermQuery(pathTerm);
-                Hits hits = searcher.search(query);
+                ScoreDoc[] hits = searcher.search(query, null, 1).scoreDocs;
 
                 // if document is found, compare the
                 // indexed last modified time with the
                 // current file
                 // - don't index if up to date
-                if (hits.length() > 0) {
-                  Document doc = hits.doc(0);
+                if (hits.length > 0) {
+                  Document doc = searcher.doc(hits[0].doc);
                   String indexModified =
                     doc.get("modified").trim();
                   if (indexModified != null) {

Modified: lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java (original)
+++ lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java Wed Oct  7 05:08:22 2009
@@ -18,7 +18,6 @@
  */
 
 import java.io.File;
-
 import java.io.IOException;
 
 import junit.framework.TestCase;
@@ -26,12 +25,10 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.StopAnalyzer;
 import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.store.FSDirectory;
-
 import org.apache.tools.ant.Project;
 import org.apache.tools.ant.types.FileSet;
 
@@ -79,9 +76,9 @@
     public void testSearch() throws Exception {
         Query query = new QueryParser("contents",analyzer).parse("test");
 
-        Hits hits = searcher.search(query);
+        int numHits = searcher.search(query, null, 1000).totalHits;
 
-        assertEquals("Find document(s)", 2, hits.length());
+        assertEquals("Find document(s)", 2, numHits);
     }
 
     /**

Modified: lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (original)
+++ lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java Wed Oct  7 05:08:22 2009
@@ -34,6 +34,7 @@
 import javax.xml.parsers.DocumentBuilderFactory;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.LowerCaseTokenizer;
 import org.apache.lucene.analysis.SimpleAnalyzer;
 import org.apache.lucene.analysis.Token;
@@ -56,7 +57,6 @@
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreRangeQuery;
 import org.apache.lucene.search.FilteredQuery;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MultiPhraseQuery;
 import org.apache.lucene.search.MultiSearcher;
@@ -76,7 +76,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.Version;
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
 
@@ -90,9 +89,9 @@
   private Query query;
   RAMDirectory ramDir;
   public IndexSearcher searcher = null;
-  public Hits hits = null;
   int numHighlights = 0;
   Analyzer analyzer = new StandardAnalyzer();
+  TopDocs hits;
 
   String[] texts = {
       "Hello this is a piece of text that is very long and contains too much preamble and the meat is really here which says kennedy has been shot",
@@ -193,8 +192,8 @@
     QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
     Highlighter highlighter = new Highlighter(scorer);
     
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME,
           new StringReader(text));
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -242,8 +241,8 @@
     QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
     Highlighter highlighter = new Highlighter(this, scorer);
     
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -266,8 +265,8 @@
     Highlighter highlighter = new Highlighter(this,scorer);
     highlighter.setTextFragmenter(new SimpleFragmenter(40));
     
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
       String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
@@ -284,8 +283,8 @@
 
     int maxNumFragmentsRequired = 2;
 
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
       QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
       Highlighter highlighter = new Highlighter(this, scorer);
@@ -309,8 +308,8 @@
     QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
     Highlighter highlighter = new Highlighter(this, scorer);
     
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
       highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 5));
@@ -328,8 +327,8 @@
     scorer = new QueryScorer(query, FIELD_NAME);
     highlighter = new Highlighter(this, scorer);
 
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
       highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 20));
@@ -350,8 +349,8 @@
     QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
     Highlighter highlighter = new Highlighter(this,scorer);
     
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME,new StringReader(text));
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -405,7 +404,7 @@
 
       public void run() throws Exception {
         mode = QUERY;
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
       }
     };
 
@@ -420,8 +419,8 @@
     Highlighter highlighter = new Highlighter(new QueryTermScorer(query));
     highlighter.setTextFragmenter(new SimpleFragmenter(40));
     int maxNumFragmentsRequired = 2;
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
       String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
@@ -447,7 +446,7 @@
 
       public void run() throws Exception {
         mode = QUERY;
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
       }
     };
 
@@ -465,7 +464,7 @@
 
       public void run() throws Exception {
         mode = QUERY;
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
       }
     };
 
@@ -480,7 +479,7 @@
       public void run() throws Exception {
         numHighlights = 0;
         doSearching("Kennedy");
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 4);
       }
@@ -495,7 +494,7 @@
       public void run() throws Exception {
         numHighlights = 0;
         doSearching("Kinnedy~");
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this, true);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this, true);
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 5);
       }
@@ -510,7 +509,7 @@
       public void run() throws Exception {
         numHighlights = 0;
         doSearching("K?nnedy");
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 4);
       }
@@ -525,7 +524,7 @@
       public void run() throws Exception {
         numHighlights = 0;
         doSearching("K*dy");
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 5);
       }
@@ -549,7 +548,7 @@
         query = parser.parse(queryString);
         doSearching(query);
 
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 5);
       }
@@ -569,10 +568,10 @@
     // it rewrites to ConstantScoreQuery which cannot be highlighted
     // query = unReWrittenQuery.rewrite(reader);
     System.out.println("Searching for: " + query.toString(FIELD_NAME));
-    hits = searcher.search(query);
+    hits = searcher.search(query, null, 1000);
 
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(HighlighterTest.FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
       QueryScorer scorer = null;
@@ -605,10 +604,10 @@
     // it rewrites to ConstantScoreQuery which cannot be highlighted
     // query = unReWrittenQuery.rewrite(reader);
     System.out.println("Searching for: " + query.toString(FIELD_NAME));
-    hits = searcher.search(query);
+    hits = searcher.search(query, null, 1000);
 
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(HighlighterTest.FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
       QueryScorer scorer = null;
@@ -631,12 +630,12 @@
     
     // try null field
     
-    hits = searcher.search(query);
+    hits = searcher.search(query, null, 1000);
     
     numHighlights = 0;
 
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(HighlighterTest.FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
       QueryScorer scorer = null;
@@ -659,12 +658,12 @@
     
     // try default field
     
-    hits = searcher.search(query);
+    hits = searcher.search(query, null, 1000);
     
     numHighlights = 0;
 
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(HighlighterTest.FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
       QueryScorer scorer = null;
@@ -692,7 +691,7 @@
       public void run() throws Exception {
         numHighlights = 0;
         doSearching("\"John Kennedy\"");
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         // Currently highlights "John" and "Kennedy" separately
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 2);
@@ -712,7 +711,7 @@
 
         SpanNearQuery snq = new SpanNearQuery(clauses, 1, true);
         doSearching(snq);
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         // Currently highlights "John" and "Kennedy" separately
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 2);
@@ -752,7 +751,7 @@
         FilteredQuery fq = new FilteredQuery(snq, rf);
 
         doSearching(fq);
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         // Currently highlights "John" and "Kennedy" separately
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 2);
@@ -774,7 +773,7 @@
         FilteredQuery fq = new FilteredQuery(pq, rf);
 
         doSearching(fq);
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         // Currently highlights "John" and "Kennedy" separately
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 2);
@@ -790,7 +789,7 @@
       public void run() throws Exception {
         numHighlights = 0;
         doSearching("John Kenn*");
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 5);
       }
@@ -805,7 +804,7 @@
       public void run() throws Exception {
         numHighlights = 0;
         doSearching("JFK OR Kennedy");
-        doStandardHighlights(analyzer, hits, query, HighlighterTest.this);
+        doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
         assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
             numHighlights == 5);
       }
@@ -820,8 +819,8 @@
       public void run() throws Exception {
         doSearching("Kennedy");
         numHighlights = 0;
-        for (int i = 0; i < hits.length(); i++) {
-          String text = hits.doc(i).get(FIELD_NAME);
+        for (int i = 0; i < hits.totalHits; i++) {
+          String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
           Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
@@ -834,8 +833,8 @@
             numHighlights == 4);
 
         numHighlights = 0;
-        for (int i = 0; i < hits.length(); i++) {
-          String text = hits.doc(i).get(FIELD_NAME);
+        for (int i = 0; i < hits.totalHits; i++) {
+          String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
           Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
               HighlighterTest.this);
@@ -845,8 +844,8 @@
             numHighlights == 4);
 
         numHighlights = 0;
-        for (int i = 0; i < hits.length(); i++) {
-          String text = hits.doc(i).get(FIELD_NAME);
+        for (int i = 0; i < hits.totalHits; i++) {
+          String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
 
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
           Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
@@ -950,8 +949,8 @@
         doSearching("Kennedy");
         // new Highlighter(HighlighterTest.this, new QueryTermScorer(query));
 
-        for (int i = 0; i < hits.length(); i++) {
-          String text = hits.doc(i).get(FIELD_NAME);
+        for (int i = 0; i < hits.totalHits; i++) {
+          String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
           Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
               HighlighterTest.this);
@@ -972,8 +971,8 @@
 
         doSearching("Kennedy");
 
-        for (int i = 0; i < hits.length(); i++) {
-          String text = hits.doc(i).get(FIELD_NAME);
+        for (int i = 0; i < hits.totalHits; i++) {
+          String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
           Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
@@ -1106,7 +1105,7 @@
         System.out.println("Searching with primitive query");
         // forget to set this and...
         // query=query.rewrite(reader);
-        Hits hits = searcher.search(query);
+        TopDocs hits = searcher.search(query, null, 1000);
 
         // create an instance of the highlighter with the tags used to surround
         // highlighted text
@@ -1116,8 +1115,8 @@
 
         int maxNumFragmentsRequired = 3;
 
-        for (int i = 0; i < hits.length(); i++) {
-          String text = hits.doc(i).get(FIELD_NAME);
+        for (int i = 0; i < hits.totalHits; i++) {
+          String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
           Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this, false);
 
@@ -1240,7 +1239,7 @@
     query = parser.parse("multi*");
     System.out.println("Searching for: " + query.toString(FIELD_NAME));
     // at this point the multisearcher calls combine(query[])
-    hits = multiSearcher.search(query);
+    hits = multiSearcher.search(query, null, 1000);
 
     // query = QueryParser.parse("multi*", FIELD_NAME, new StandardAnalyzer());
     Query expandedQueries[] = new Query[2];
@@ -1252,8 +1251,8 @@
     // highlighted text
     Highlighter highlighter = new Highlighter(this, new QueryTermScorer(query));
 
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = multiSearcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
       String highlightedText = highlighter.getBestFragment(tokenStream, text);
       System.out.println(highlightedText);
@@ -1549,8 +1548,8 @@
    * Highlighter highlighter = new Highlighter(this,new
    * QueryFragmentScorer(query));
    * 
-   * for (int i = 0; i < hits.length(); i++) { String text =
-   * hits.doc(i).get(FIELD_NAME); TokenStream
+   * for (int i = 0; i < hits.totalHits; i++) { String text =
+   * searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream
    * tokenStream=bigramAnalyzer.tokenStream(FIELD_NAME,new StringReader(text));
    * String highlightedText = highlighter.getBestFragment(tokenStream,text);
    * System.out.println(highlightedText); } }
@@ -1577,13 +1576,13 @@
     // you must use a rewritten query!
     query = unReWrittenQuery.rewrite(reader);
     System.out.println("Searching for: " + query.toString(FIELD_NAME));
-    hits = searcher.search(query);
+    hits = searcher.search(query, null, 1000);
   }
 
   public void assertExpectedHighlightCount(final int maxNumFragmentsRequired,
       final int expectedHighlights) throws Exception {
-    for (int i = 0; i < hits.length(); i++) {
-      String text = hits.doc(i).get(FIELD_NAME);
+    for (int i = 0; i < hits.totalHits; i++) {
+      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
       QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
       Highlighter highlighter = new Highlighter(this, scorer);
@@ -1770,16 +1769,16 @@
       }
     }
 
-    void doStandardHighlights(Analyzer analyzer, Hits hits, Query query, Formatter formatter)
+    void doStandardHighlights(Analyzer analyzer, IndexSearcher searcher, TopDocs hits, Query query, Formatter formatter)
     throws Exception {
-      doStandardHighlights(analyzer, hits, query, formatter, false);
+      doStandardHighlights(analyzer, searcher, hits, query, formatter, false);
     }
     
-    void doStandardHighlights(Analyzer analyzer, Hits hits, Query query, Formatter formatter, boolean expandMT)
+    void doStandardHighlights(Analyzer analyzer, IndexSearcher searcher, TopDocs hits, Query query, Formatter formatter, boolean expandMT)
         throws Exception {
 
-      for (int i = 0; i < hits.length(); i++) {
-        String text = hits.doc(i).get(HighlighterTest.FIELD_NAME);
+      for (int i = 0; i < hits.totalHits; i++) {
+        String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
         int maxNumFragmentsRequired = 2;
         String fragmentSeparator = "...";
         Scorer scorer = null;

Modified: lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java (original)
+++ lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java Wed Oct  7 05:08:22 2009
@@ -17,13 +17,13 @@
  * limitations under the License.
  */
 
+import java.io.File;
 import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Comparator;
-import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -31,7 +31,6 @@
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.Map.Entry;
-import java.io.File;
 
 import jline.ConsoleReader;
 
@@ -50,10 +49,12 @@
 import org.apache.lucene.index.IndexReader.FieldOption;
 import org.apache.lucene.queryParser.MultiFieldQueryParser;
 import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.store.FSDirectory;
 
@@ -117,31 +118,33 @@
 
   public void search(String queryString, boolean explain, boolean showTokens, ConsoleReader cr)
   		throws java.io.IOException, org.apache.lucene.queryParser.ParseException {
-    Hits hits = initSearch(queryString);
-    System.out.println(hits.length() + " total matching documents");
+    initSearch(queryString);
+    int numHits = computeCount(query);
+    System.out.println(numHits + " total matching documents");
     if (explain) {
       query = explainQuery(queryString);
     }
 
     final int HITS_PER_PAGE = 10;
     message("--------------------------------------");
-    for (int start = 0; start < hits.length(); start += HITS_PER_PAGE) {
-      int end = Math.min(hits.length(), start + HITS_PER_PAGE);
+    for (int start = 0; start < numHits; start += HITS_PER_PAGE) {
+      int end = Math.min(numHits, start + HITS_PER_PAGE);
+      ScoreDoc[] hits = search(query, end);
       for (int ii = start; ii < end; ii++) {
-        Document doc = hits.doc(ii);
-        message("---------------- " + (ii + 1) + " score:" + hits.score(ii) + "---------------------");
+        Document doc = searcher.doc(hits[ii].doc);
+        message("---------------- " + (ii + 1) + " score:" + hits[ii].score + "---------------------");
         printHit(doc);
         if (showTokens) {
           invertDocument(doc);
         }
         if (explain) {
-          Explanation exp = searcher.explain(query, hits.id(ii));
+          Explanation exp = searcher.explain(query, hits[ii].doc);
           message("Explanation:" + exp.toString());
         }
       }
       message("#################################################");
 
-      if (hits.length() > end) {
+      if (numHits > end) {
       	// TODO: don't let the input end up in the command line history
       	queryString = cr.readLine("more (y/n) ? ");
         if (queryString.length() == 0 || queryString.charAt(0) == 'n')
@@ -201,7 +204,7 @@
   /**
    * TODO: Allow user to specify analyzer
    */
-  private Hits initSearch(String queryString) throws IOException, ParseException {
+  private void initSearch(String queryString) throws IOException, ParseException {
 
     searcher = new IndexSearcher(indexName, true);
     Analyzer analyzer = createAnalyzer();
@@ -215,16 +218,38 @@
     MultiFieldQueryParser parser = new MultiFieldQueryParser(fieldsArray, analyzer);
     query = parser.parse(queryString);
     System.out.println("Searching for: " + query.toString());
-    Hits hits = searcher.search(query);
-    return (hits);
-
+  }
+  
+  final static class CountingCollector extends Collector {
+    public int numHits = 0;
+    
+    public void setScorer(Scorer scorer) throws IOException {}
+    public void collect(int doc) throws IOException {
+      numHits++;
+    }
+
+    public void setNextReader(IndexReader reader, int docBase) {}
+    public boolean acceptsDocsOutOfOrder() {
+      return true;
+    }    
+  }
+  
+  private int computeCount(Query q) throws IOException {
+    CountingCollector countingCollector = new CountingCollector();
+    
+    searcher.search(q, countingCollector);    
+    return countingCollector.numHits;
   }
 
   public void count(String queryString) throws java.io.IOException, ParseException {
-    Hits hits = initSearch(queryString);
-    System.out.println(hits.length() + " total documents");
+    initSearch(queryString);
+    System.out.println(computeCount(query) + " total documents");
     searcher.close();
   }
+  
+  private ScoreDoc[] search(Query q, int numHits) throws IOException {
+    return searcher.search(query, numHits).scoreDocs;
+  }
 
   static public void message(String s) {
     System.out.println(s);

Modified: lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java (original)
+++ lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java Wed Oct  7 05:08:22 2009
@@ -132,22 +132,24 @@
       
       ChainedFilter chain = getChainedFilter(new Filter[] {dateFilter}, null, old);
   
-      Hits hits = searcher.search(query, chain);
-      assertEquals(MAX, hits.length());
+      int numHits = searcher.search(query, chain, 1000).totalHits;
+      assertEquals(MAX, numHits);
   
       chain = new ChainedFilter(new Filter[] {bobFilter});
-      hits = searcher.search(query, chain);
-      assertEquals(MAX / 2, hits.length());
+      numHits = searcher.search(query, chain, 1000).totalHits;
+      assertEquals(MAX / 2, numHits);
       
       chain = getChainedFilter(new Filter[] {bobFilter}, new int[] {ChainedFilter.AND}, old);
-      hits = searcher.search(query, chain);
-      assertEquals(MAX / 2, hits.length());
-      assertEquals("bob", hits.doc(0).get("owner"));
+      TopDocs hits = searcher.search(query, chain, 1000);
+      numHits = hits.totalHits;
+      assertEquals(MAX / 2, numHits);
+      assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner"));
       
       chain = getChainedFilter(new Filter[] {bobFilter}, new int[] {ChainedFilter.ANDNOT}, old);
-      hits = searcher.search(query, chain);
-      assertEquals(MAX / 2, hits.length());
-      assertEquals("sue", hits.doc(0).get("owner"));
+      hits = searcher.search(query, chain, 1000);
+      numHits = hits.totalHits;
+      assertEquals(MAX / 2, numHits);
+      assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner"));
     }
   }
 
@@ -157,8 +159,8 @@
       ChainedFilter chain = getChainedFilter(
         new Filter[] {sueFilter, bobFilter}, null, old);
   
-      Hits hits = searcher.search(query, chain);
-      assertEquals("OR matches all", MAX, hits.length());
+      int numHits = searcher.search(query, chain, 1000).totalHits;
+      assertEquals("OR matches all", MAX, numHits);
     }
   }
 
@@ -168,9 +170,9 @@
       ChainedFilter chain = getChainedFilter(
         new Filter[] {dateFilter, bobFilter}, ChainedFilter.AND, old);
   
-      Hits hits = searcher.search(query, chain);
-      assertEquals("AND matches just bob", MAX / 2, hits.length());
-      assertEquals("bob", hits.doc(0).get("owner"));
+      TopDocs hits = searcher.search(query, chain, 1000);
+      assertEquals("AND matches just bob", MAX / 2, hits.totalHits);
+      assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner"));
     }
   }
 
@@ -180,9 +182,9 @@
       ChainedFilter chain = getChainedFilter(
         new Filter[]{dateFilter, bobFilter}, ChainedFilter.XOR, old);
   
-      Hits hits = searcher.search(query, chain);
-      assertEquals("XOR matches sue", MAX / 2, hits.length());
-      assertEquals("sue", hits.doc(0).get("owner"));
+      TopDocs hits = searcher.search(query, chain, 1000);
+      assertEquals("XOR matches sue", MAX / 2, hits.totalHits);
+      assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner"));
     }
   }
 
@@ -193,19 +195,19 @@
         new Filter[]{dateFilter, sueFilter},
           new int[] {ChainedFilter.AND, ChainedFilter.ANDNOT}, old);
   
-      Hits hits = searcher.search(query, chain);
+      TopDocs hits = searcher.search(query, chain, 1000);
       assertEquals("ANDNOT matches just bob",
-          MAX / 2, hits.length());
-      assertEquals("bob", hits.doc(0).get("owner"));
+          MAX / 2, hits.totalHits);
+      assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner"));
       
       chain = getChainedFilter(
           new Filter[]{bobFilter, bobFilter},
             new int[] {ChainedFilter.ANDNOT, ChainedFilter.ANDNOT}, old);
   
-        hits = searcher.search(query, chain);
+        hits = searcher.search(query, chain, 1000);
         assertEquals("ANDNOT bob ANDNOT bob matches all sues",
-            MAX / 2, hits.length());
-        assertEquals("sue", hits.doc(0).get("owner"));
+            MAX / 2, hits.totalHits);
+        assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner"));
     }
   }
 

Modified: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (original)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java Wed Oct  7 05:08:22 2009
@@ -15,39 +15,40 @@
  */
 package org.apache.lucene.search.similar;
 
-import org.apache.lucene.util.PriorityQueue;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintStream;
+import java.io.Reader;
+import java.io.StringReader;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermFreqVector;
-import org.apache.lucene.search.BooleanClause;	
-import org.apache.lucene.search.DefaultSimilarity;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.document.Document;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.FSDirectory;
-
-import java.util.Set;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Collection;
-import java.util.Iterator;
-import java.io.IOException;
-import java.io.Reader;
-import java.io.File;
-import java.io.PrintStream;
-import java.io.StringReader;
-import java.io.FileReader;
-import java.io.InputStreamReader;
-import java.net.URL;
-import java.util.ArrayList;
+import org.apache.lucene.util.PriorityQueue;
 
 
 /**
@@ -745,14 +746,15 @@
         o.println();
         IndexSearcher searcher = new IndexSearcher(dir, true);
 
-        Hits hits = searcher.search(query);
-        int len = hits.length();
+        TopDocs hits = searcher.search(query, null, 25);
+        int len = hits.totalHits;
         o.println("found: " + len + " documents matching");
         o.println();
+        ScoreDoc[] scoreDocs = hits.scoreDocs;
         for (int i = 0; i < Math.min(25, len); i++) {
-            Document d = hits.doc(i);
+            Document d = searcher.doc(scoreDocs[i].doc);
 			String summary = d.get( "summary");
-            o.println("score  : " + hits.score(i));
+            o.println("score  : " + scoreDocs[i].score);
             o.println("url    : " + d.get("url"));
             o.println("\ttitle  : " + d.get("title"));
 			if ( summary != null)

Modified: lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (original)
+++ lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java Wed Oct  7 05:08:22 2009
@@ -80,10 +80,10 @@
 	{
 		DuplicateFilter df=new DuplicateFilter(KEY_FIELD);		
 		HashSet results=new HashSet();
-		Hits h = searcher.search(tq,df);
-		for(int i=0;i<h.length();i++)
+		ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs;
+		for(int i=0;i<hits.length;i++)
 		{
-			Document d=h.doc(i);
+			Document d=searcher.doc(hits[i].doc);
 			String url=d.get(KEY_FIELD);
 			assertFalse("No duplicate urls should be returned",results.contains(url));
 			results.add(url);
@@ -92,12 +92,12 @@
 	public void testNoFilter() throws Throwable
 	{
 		HashSet results=new HashSet();
-		Hits h = searcher.search(tq);
-		assertTrue("Default searching should have found some matches",h.length()>0);
+		ScoreDoc[] hits = searcher.search(tq, null, 1000).scoreDocs;
+		assertTrue("Default searching should have found some matches",hits.length>0);
 		boolean dupsFound=false;
-		for(int i=0;i<h.length();i++)
+		for(int i=0;i<hits.length;i++)
 		{
-			Document d=h.doc(i);
+			Document d=searcher.doc(hits[i].doc);
 			String url=d.get(KEY_FIELD);
 			if(!dupsFound)
 				dupsFound=results.contains(url);
@@ -111,11 +111,11 @@
 		DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
 		df.setProcessingMode(DuplicateFilter.PM_FAST_INVALIDATION);
 		HashSet results=new HashSet();
-		Hits h = searcher.search(tq,df);
-		assertTrue("Filtered searching should have found some matches",h.length()>0);
-		for(int i=0;i<h.length();i++)
+		ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs;
+		assertTrue("Filtered searching should have found some matches",hits.length>0);
+		for(int i=0;i<hits.length;i++)
 		{
-			Document d=h.doc(i);
+			Document d=searcher.doc(hits[i].doc);
 			String url=d.get(KEY_FIELD);
 			assertFalse("No duplicate urls should be returned",results.contains(url));
 			results.add(url);
@@ -126,11 +126,11 @@
 	{
 		DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
 		df.setKeepMode(DuplicateFilter.KM_USE_LAST_OCCURRENCE);
-		Hits h = searcher.search(tq,df);
-		assertTrue("Filtered searching should have found some matches",h.length()>0);
-		for(int i=0;i<h.length();i++)
+		ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs;
+		assertTrue("Filtered searching should have found some matches",hits.length>0);
+		for(int i=0;i<hits.length;i++)
 		{
-			Document d=h.doc(i);
+			Document d=searcher.doc(hits[i].doc);
 			String url=d.get(KEY_FIELD);
 			TermDocs td = reader.termDocs(new Term(KEY_FIELD,url));
 			int lastDoc=0;
@@ -138,7 +138,7 @@
 			{
 				lastDoc=td.doc();
 			}
-			assertEquals("Duplicate urls should return last doc",lastDoc, h.id((i)));
+			assertEquals("Duplicate urls should return last doc",lastDoc, hits[i].doc);
 		}
 	}	
 	
@@ -147,17 +147,17 @@
 	{
 		DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
 		df.setKeepMode(DuplicateFilter.KM_USE_FIRST_OCCURRENCE);
-		Hits h = searcher.search(tq,df);
-		assertTrue("Filtered searching should have found some matches",h.length()>0);
-		for(int i=0;i<h.length();i++)
+		ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs;
+		assertTrue("Filtered searching should have found some matches",hits.length>0);
+		for(int i=0;i<hits.length;i++)
 		{
-			Document d=h.doc(i);
+			Document d=searcher.doc(hits[i].doc);
 			String url=d.get(KEY_FIELD);
 			TermDocs td = reader.termDocs(new Term(KEY_FIELD,url));
 			int lastDoc=0;
 			td.next();
 			lastDoc=td.doc();
-			assertEquals("Duplicate urls should return first doc",lastDoc, h.id((i)));
+			assertEquals("Duplicate urls should return first doc",lastDoc, hits[i].doc);
 		}
 	}	
 	

Modified: lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (original)
+++ lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java Wed Oct  7 05:08:22 2009
@@ -67,7 +67,7 @@
     if ( capability != null )
       query.setRegexImplementation(capability);
     
-    return searcher.search(query).length();
+    return searcher.search(query, null, 1000).totalHits;
   }
 
   private int  spanRegexQueryNrHits(String regex1, String regex2, int slop, boolean ordered) throws Exception {
@@ -75,7 +75,7 @@
     SpanRegexQuery srq2 = new SpanRegexQuery( newTerm(regex2));
     SpanNearQuery query = new SpanNearQuery( new SpanQuery[]{srq1, srq2}, slop, ordered);
     
-    return searcher.search(query).length();
+    return searcher.search(query, null, 1000).totalHits;
   }
 
   public void testMatchAll() throws Exception {

Modified: lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (original)
+++ lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java Wed Oct  7 05:08:22 2009
@@ -28,7 +28,6 @@
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MultiSearcher;
 import org.apache.lucene.search.spans.SpanFirstQuery;
@@ -66,8 +65,8 @@
     SpanFirstQuery sfq = new SpanFirstQuery(srq, 1);
     // SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6,
     // true);
-    Hits hits = searcher.search(sfq);
-    assertEquals(1, hits.length());
+    int numHits = searcher.search(sfq, null, 1000).totalHits;
+    assertEquals(1, numHits);
   }
 
   public void testSpanRegexBug() throws CorruptIndexException, IOException {
@@ -83,7 +82,7 @@
     arrSearcher[0] = new IndexSearcher(indexStoreA, true);
     arrSearcher[1] = new IndexSearcher(indexStoreB, true);
     MultiSearcher searcher = new MultiSearcher(arrSearcher);
-    Hits hits = searcher.search(query);
+    int numHits = searcher.search(query, null, 1000).totalHits;
     arrSearcher[0].close();
     arrSearcher[1].close();
 
@@ -92,7 +91,7 @@
     // The rewriter function only write it once on the first IndexSearcher
     // So it's using term: a1 b1 to search on the second IndexSearcher
     // As a result, it won't match the document in the second IndexSearcher
-    assertEquals(2, hits.length());
+    assertEquals(2, numHits);
     indexStoreA.close();
     indexStoreB.close();
   }

Modified: lucene/java/trunk/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (original)
+++ lucene/java/trunk/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java Wed Oct  7 05:08:22 2009
@@ -28,25 +28,26 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.function.CustomScoreQuery;
 import org.apache.lucene.search.function.FieldScoreQuery;
 import org.apache.lucene.search.function.FieldScoreQuery.Type;
-import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.spatial.geohash.GeoHashUtils;
+import org.apache.lucene.spatial.geometry.DistanceUnits;
+import org.apache.lucene.spatial.geometry.FloatLatLng;
+import org.apache.lucene.spatial.geometry.LatLng;
 import org.apache.lucene.spatial.tier.projections.CartesianTierPlotter;
 import org.apache.lucene.spatial.tier.projections.IProjector;
 import org.apache.lucene.spatial.tier.projections.SinusoidalProjector;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.spatial.geometry.LatLng;
-import org.apache.lucene.spatial.geometry.FloatLatLng;
-import org.apache.lucene.spatial.geometry.DistanceUnits;
+import org.apache.lucene.util.NumericUtils;
 
 /**
  *
@@ -210,10 +211,10 @@
 
     // Perform the search, using the term query, the serial chain filter, and the
     // distance sort
-    Hits hits = searcher.search(customScore,null,sort);
-
-    int results = hits.length();
-
+    TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
+    int results = hits.totalHits;
+    ScoreDoc[] scoreDocs = hits.scoreDocs; 
+    
     // Get a list of distances
     Map<Integer,Double> distances = dq.distanceFilter.getDistances();
 
@@ -234,12 +235,12 @@
     assertEquals(2, results);
     double lastDistance = 0;
     for(int i =0 ; i < results; i++){
-      Document d = hits.doc(i);
+      Document d = searcher.doc(scoreDocs[i].doc);
 
       String name = d.get("name");
       double rsLat = NumericUtils.prefixCodedToDouble(d.get(latField));
       double rsLng = NumericUtils.prefixCodedToDouble(d.get(lngField));
-      Double geo_distance = distances.get(hits.id(i));
+      Double geo_distance = distances.get(scoreDocs[i].doc);
 
       double distance = DistanceUtils.getInstance().getDistanceMi(lat, lng, rsLat, rsLng);
       double llm = DistanceUtils.getInstance().getLLMDistance(lat, lng, rsLat, rsLng);
@@ -297,9 +298,9 @@
 
     // Perform the search, using the term query, the serial chain filter, and the
     // distance sort
-    Hits hits = searcher.search(customScore,null,sort);
-
-    int results = hits.length();
+    TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
+    int results = hits.totalHits;
+    ScoreDoc[] scoreDocs = hits.scoreDocs; 
 
     // Get a list of distances
     Map<Integer,Double> distances = dq.distanceFilter.getDistances();
@@ -321,11 +322,11 @@
     assertEquals(18, results);
     double lastDistance = 0;
     for(int i =0 ; i < results; i++){
-      Document d = hits.doc(i);
+      Document d = searcher.doc(scoreDocs[i].doc);
       String name = d.get("name");
       double rsLat = NumericUtils.prefixCodedToDouble(d.get(latField));
       double rsLng = NumericUtils.prefixCodedToDouble(d.get(lngField));
-      Double geo_distance = distances.get(hits.id(i));
+      Double geo_distance = distances.get(scoreDocs[i].doc);
 
       double distance = DistanceUtils.getInstance().getDistanceMi(lat, lng, rsLat, rsLng);
       double llm = DistanceUtils.getInstance().getLLMDistance(lat, lng, rsLat, rsLng);
@@ -386,9 +387,9 @@
     
       // Perform the search, using the term query, the serial chain filter, and the
       // distance sort
-      Hits hits = searcher.search(customScore,null,sort);
-
-      int results = hits.length();
+      TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
+      int results = hits.totalHits;
+      ScoreDoc[] scoreDocs = hits.scoreDocs; 
     
       // Get a list of distances 
       Map<Integer,Double> distances = dq.distanceFilter.getDistances();
@@ -410,12 +411,12 @@
       assertEquals(expected[x], results);
       double lastDistance = 0;
       for(int i =0 ; i < results; i++){
-        Document d = hits.doc(i);
+        Document d = searcher.doc(scoreDocs[i].doc);
       
         String name = d.get("name");
         double rsLat = NumericUtils.prefixCodedToDouble(d.get(latField));
         double rsLng = NumericUtils.prefixCodedToDouble(d.get(lngField)); 
-        Double geo_distance = distances.get(hits.id(i));
+        Double geo_distance = distances.get(scoreDocs[i].doc);
       
         double distance = DistanceUtils.getInstance().getDistanceMi(lat, lng, rsLat, rsLng);
         double llm = DistanceUtils.getInstance().getLLMDistance(lat, lng, rsLat, rsLng);
@@ -475,9 +476,9 @@
 	    
       // Perform the search, using the term query, the serial chain filter, and the
       // distance sort
-      Hits hits = searcher.search(customScore, dq.getFilter()); //,sort);
-
-      int results = hits.length();
+      TopDocs hits = searcher.search(customScore.createWeight(searcher),dq.getFilter(), 1000); //,sort);
+      int results = hits.totalHits;
+      ScoreDoc[] scoreDocs = hits.scoreDocs; 
 	    
       // Get a list of distances 
       Map<Integer,Double> distances = dq.distanceFilter.getDistances();
@@ -499,16 +500,16 @@
       assertEquals(expected[x], results);
 	    
       for(int i =0 ; i < results; i++){
-        Document d = hits.doc(i);
+        Document d = searcher.doc(scoreDocs[i].doc);
 	      
         String name = d.get("name");
         double rsLat = NumericUtils.prefixCodedToDouble(d.get(latField));
         double rsLng = NumericUtils.prefixCodedToDouble(d.get(lngField)); 
-        Double geo_distance = distances.get(hits.id(i));
+        Double geo_distance = distances.get(scoreDocs[i].doc);
 	      
         double distance = DistanceUtils.getInstance().getDistanceMi(lat, lng, rsLat, rsLng);
         double llm = DistanceUtils.getInstance().getLLMDistance(lat, lng, rsLat, rsLng);
-        System.out.println("Name: "+ name +", Distance (res, ortho, harvesine):"+ distance +" |"+ geo_distance +"|"+ llm +" | score "+ hits.score(i));
+        System.out.println("Name: "+ name +", Distance (res, ortho, harvesine):"+ distance +" |"+ geo_distance +"|"+ llm +" | score "+ scoreDocs[i].score);
         assertTrue(Math.abs((distance - llm)) < 1);
         assertTrue((distance < miles ));
 	      

Modified: lucene/java/trunk/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java (original)
+++ lucene/java/trunk/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java Wed Oct  7 05:08:22 2009
@@ -17,6 +17,9 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.Iterator;
+
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -25,15 +28,12 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
 
-import java.io.IOException;
-import java.util.Iterator;
-
 /**
  * <p>
  *   Spell Checker class  (Main class) <br/>
@@ -214,17 +214,19 @@
       }
     }
 
+    int maxHits = 10 * numSug;
+    
 //    System.out.println("Q: " + query);
-    Hits hits = searcher.search(query);
+    ScoreDoc[] hits = searcher.search(query, null, maxHits).scoreDocs;
 //    System.out.println("HITS: " + hits.length());
     SuggestWordQueue sugQueue = new SuggestWordQueue(numSug);
 
     // go thru more than 'maxr' matches in case the distance filter triggers
-    int stop = Math.min(hits.length(), 10 * numSug);
+    int stop = Math.min(hits.length, maxHits);
     SuggestWord sugWord = new SuggestWord();
     for (int i = 0; i < stop; i++) {
 
-      sugWord.string = hits.doc(i).get(F_WORD); // get orig word
+      sugWord.string = searcher.doc(hits[i].doc).get(F_WORD); // get orig word
 
       // don't suggest a word for itself, that would be silly
       if (sugWord.string.equals(word)) {

Modified: lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java (original)
+++ lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java Wed Oct  7 05:08:22 2009
@@ -16,22 +16,28 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.store.RAMDirectory;
+import java.io.IOException;
+import java.util.ArrayList;
+
+import javax.swing.AbstractListModel;
+import javax.swing.ListModel;
+import javax.swing.event.ListDataEvent;
+import javax.swing.event.ListDataListener;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.queryParser.MultiFieldQueryParser;
+import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.queryParser.MultiFieldQueryParser;
-
-import javax.swing.*;
-import javax.swing.event.ListDataListener;
-import javax.swing.event.ListDataEvent;
-import java.util.ArrayList;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.store.RAMDirectory;
 
 /**
  * See table searcher explanation.
@@ -163,10 +169,8 @@
             // has some weirdness.
             MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, analyzer);
             Query query =parser.parse(searchString);
-            //run the search
-            Hits hits = is.search(query);
             //reset this list model with the new results
-            resetSearchResults(hits);
+            resetSearchResults(is, query);
         } catch (Exception e){
             e.printStackTrace();
         }
@@ -175,21 +179,41 @@
         fireContentsChanged(this, 0, getSize());
     }
 
+    final static class CountingCollector extends Collector {
+      public int numHits = 0;
+      
+      public void setScorer(Scorer scorer) throws IOException {}
+      public void collect(int doc) throws IOException {
+        numHits++;
+      }
+
+      public void setNextReader(IndexReader reader, int docBase) {}
+      public boolean acceptsDocsOutOfOrder() {
+        return true;
+      }    
+    }
+
+    
     /**
      *
      * @param hits The new result set to set this list to.
      */
-    private void resetSearchResults(Hits hits) {
+    private void resetSearchResults(IndexSearcher searcher, Query query) {
         try {
             //clear our index mapping this list model rows to
             //the decorated inner list model
             rowToModelIndex.clear();
+            
+            CountingCollector countingCollector = new CountingCollector();
+            searcher.search(query, countingCollector);
+            ScoreDoc[] hits = searcher.search(query, countingCollector.numHits).scoreDocs;
+            
             //iterate through the hits
             //get the row number stored at the index
             //that number is the row number of the decorated
             //table model row that we are mapping to
-            for (int t=0; t<hits.length(); t++){
-                Document document = hits.doc(t);
+            for (int t=0; t<hits.length; t++){
+                Document document = searcher.doc(hits[t].doc);
                 Fieldable field = document.getField(ROW_NUMBER);
                 rowToModelIndex.add(Integer.valueOf(field.stringValue()));
             }

Modified: lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java (original)
+++ lucene/java/trunk/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java Wed Oct  7 05:08:22 2009
@@ -16,6 +16,13 @@
  * limitations under the License.
  */
 
+import java.util.ArrayList;
+
+import javax.swing.event.TableModelEvent;
+import javax.swing.event.TableModelListener;
+import javax.swing.table.AbstractTableModel;
+import javax.swing.table.TableModel;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
@@ -23,16 +30,11 @@
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.queryParser.MultiFieldQueryParser;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.store.RAMDirectory;
-
-import javax.swing.event.TableModelEvent;
-import javax.swing.event.TableModelListener;
-import javax.swing.table.AbstractTableModel;
-import javax.swing.table.TableModel;
-import java.util.ArrayList;
+import org.apache.lucene.swing.models.ListSearcher.CountingCollector;
 
 
 /**
@@ -244,10 +246,8 @@
             // has some weirdness.
             MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, analyzer);
             Query query = parser.parse(searchString);
-            //run the search
-            Hits hits = is.search(query);
             //reset this table model with the new results
-            resetSearchResults(hits);
+            resetSearchResults(is, query);
         } catch (Exception e){
             e.printStackTrace();
         }
@@ -260,17 +260,22 @@
      *
      * @param hits The new result set to set this table to.
      */
-    private void resetSearchResults(Hits hits) {
+    private void resetSearchResults(IndexSearcher searcher, Query query) {
         try {
             //clear our index mapping this table model rows to
             //the decorated inner table model
             rowToModelIndex.clear();
+            
+            CountingCollector countingCollector = new CountingCollector();
+            searcher.search(query, countingCollector);
+            ScoreDoc[] hits = searcher.search(query, countingCollector.numHits).scoreDocs;
+            
             //iterate through the hits
             //get the row number stored at the index
             //that number is the row number of the decorated
             //table model row that we are mapping to
-            for (int t=0; t<hits.length(); t++){
-                Document document = hits.doc(t);
+            for (int t=0; t<hits.length; t++){
+                Document document = searcher.doc(hits[t].doc);
                 Fieldable field = document.getField(ROW_NUMBER);
                 rowToModelIndex.add(Integer.valueOf(field.stringValue()));
             }

Modified: lucene/java/trunk/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java?rev=822587&r1=822586&r2=822587&view=diff
==============================================================================
--- lucene/java/trunk/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (original)
+++ lucene/java/trunk/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java Wed Oct  7 05:08:22 2009
@@ -17,9 +17,9 @@
  * limitations under the License.
  */
 
+import java.io.File;
 import java.io.IOException;
 import java.io.StringReader;
-import java.io.File;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
@@ -31,12 +31,14 @@
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Hits;
+import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.FSDirectory;
@@ -103,13 +105,13 @@
 	public static Query expand( String query,
 								Searcher syns,
 								Analyzer a,
-								String field,
-								float boost)
+								String f,
+								final float boost)
 		throws IOException
 	{
-		Set already = new HashSet(); // avoid dups 
+		final Set already = new HashSet(); // avoid dups 
 		List top = new LinkedList(); // needs to be separately listed..
-		if ( field == null) field = "contents";
+		final String field = ( f == null) ? "contents" : f;
 		if ( a == null) a = new StandardAnalyzer();
 
 		// [1] Parse query into separate words so that when we expand we can avoid dups
@@ -121,7 +123,7 @@
 			if ( already.add( word))
 				top.add( word);
 		}
-		BooleanQuery tmp = new BooleanQuery();
+		final BooleanQuery tmp = new BooleanQuery();
 		
 		// [2] form query
 		Iterator it = top.iterator();
@@ -132,24 +134,42 @@
 			TermQuery tq = new TermQuery( new Term( field, word));
 			tmp.add( tq, BooleanClause.Occur.SHOULD);
 
+			syns.search(new TermQuery( new Term(Syns2Index.F_WORD, word)), new Collector() {
+			  IndexReader reader;
+			  
+        @Override
+        public boolean acceptsDocsOutOfOrder() {
+          return true;
+        }
+
+        @Override
+        public void collect(int doc) throws IOException {
+          Document d = reader.document(doc);
+          String[] values = d.getValues( Syns2Index.F_SYN);
+          for ( int j = 0; j < values.length; j++)
+          {
+            String syn = values[ j];
+            if ( already.add( syn)) // avoid dups of top level words and synonyms
+            {
+              TermQuery tq = new TermQuery( new Term( field, syn));
+              if ( boost > 0) // else keep normal 1.0
+                tq.setBoost( boost);
+              tmp.add( tq, BooleanClause.Occur.SHOULD); 
+            }
+          }
+        }
+
+        @Override
+        public void setNextReader(IndexReader reader, int docBase)
+            throws IOException {
+          this.reader = reader;
+        }
+
+        @Override
+        public void setScorer(Scorer scorer) throws IOException {}
+			});
+			
 			// [2b] add in unique synonums
-			Hits hits = syns.search( new TermQuery( new Term(Syns2Index.F_WORD, word)));
-			for (int i = 0; i < hits.length(); i++)
-			{
-				Document doc = hits.doc(i);
-				String[] values = doc.getValues( Syns2Index.F_SYN);
-				for ( int j = 0; j < values.length; j++)
-				{
-					String syn = values[ j];
-					if ( already.add( syn)) // avoid dups of top level words and synonyms
-					{
-						tq = new TermQuery( new Term( field, syn));
-						if ( boost > 0) // else keep normal 1.0
-							tq.setBoost( boost);
-						tmp.add( tq, BooleanClause.Occur.SHOULD); 
-					}
-				}
-			}
 		}
 
 



Mime
View raw message