lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rm...@apache.org
Subject svn commit: r1177888 [2/16] - in /lucene/dev/branches/lucene2621: ./ dev-tools/eclipse/ dev-tools/idea/lucene/contrib/ dev-tools/maven/ lucene/ lucene/contrib/ lucene/contrib/demo/src/java/org/apache/lucene/demo/ lucene/contrib/demo/src/java/org/apache...
Date Sat, 01 Oct 2011 03:05:07 GMT
Modified: lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java Sat Oct  1 03:04:53 2011
@@ -155,7 +155,8 @@ public class HighlighterTest extends Bas
    */
   private static String highlightField(Query query, String fieldName, String text)
       throws IOException, InvalidTokenOffsetsException {
-    TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName, new StringReader(text));
+    TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)
+        .tokenStream(fieldName, new StringReader(text));
     // Assuming "<B>", "</B>" used to highlight
     SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
     QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME);
@@ -521,7 +522,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME,new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -764,12 +765,8 @@ public class HighlighterTest extends Bas
       String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
-      QueryScorer scorer;
-      TokenStream tokenStream;
-
-      tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
-      
-      scorer = new QueryScorer(query, HighlighterTest.FIELD_NAME);
+      QueryScorer scorer = new QueryScorer(query, HighlighterTest.FIELD_NAME);
+      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
 
       Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -792,12 +789,8 @@ public class HighlighterTest extends Bas
       String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
-      QueryScorer scorer;
-      TokenStream tokenStream;
-
-      tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
-      
-      scorer = new QueryScorer(query, null);
+      QueryScorer scorer = new QueryScorer(query, null);
+      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
 
       Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -820,12 +813,8 @@ public class HighlighterTest extends Bas
       String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
-      QueryScorer scorer;
-      TokenStream tokenStream;
-
-      tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
-      
-      scorer = new QueryScorer(query, "random_field", HighlighterTest.FIELD_NAME);
+      QueryScorer scorer = new QueryScorer(query, "random_field", HighlighterTest.FIELD_NAME);
+      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
 
       Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -998,7 +987,7 @@ public class HighlighterTest extends Bas
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
-          Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
+          Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
           highlighter.setTextFragmenter(new SimpleFragmenter(40));
           String result = highlighter.getBestFragment(tokenStream, text);
@@ -1010,8 +999,7 @@ public class HighlighterTest extends Bas
         numHighlights = 0;
         for (int i = 0; i < hits.totalHits; i++) {
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
-          Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
+          Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
           highlighter.getBestFragment(analyzer, FIELD_NAME, text);
         }
@@ -1022,8 +1010,7 @@ public class HighlighterTest extends Bas
         for (int i = 0; i < hits.totalHits; i++) {
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
 
-          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
-          Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
+          Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
           highlighter.getBestFragments(analyzer, FIELD_NAME, text, 10);
         }
@@ -1101,12 +1088,10 @@ public class HighlighterTest extends Bas
         query.add(new TermQuery(new Term("bookid", "soccer")), Occur.SHOULD);
         query.add(new TermQuery(new Term("bookid", "footie")), Occur.SHOULD);
 
-        TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(s));
-
-        Highlighter highlighter = getHighlighter(query, null, tokenStream, HighlighterTest.this);
+        Highlighter highlighter = getHighlighter(query, null, HighlighterTest.this);
 
         // Get 3 best fragments and seperate with a "..."
-        tokenStream = analyzer.tokenStream(null, new StringReader(s));
+        TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(s));
 
         String result = highlighter.getBestFragments(tokenStream, s, 3, "...");
         String expectedResult = "<B>football</B>-<B>soccer</B> in the euro 2004 <B>footie</B> competition";
@@ -1132,7 +1117,7 @@ public class HighlighterTest extends Bas
         for (int i = 0; i < hits.totalHits; i++) {
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
-          Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
+          Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
           String result = highlighter.getBestFragment(tokenStream, text);
           if (VERBOSE) System.out.println("\t" + result);
@@ -1156,7 +1141,7 @@ public class HighlighterTest extends Bas
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
 
-          Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
+          Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);// new Highlighter(this, new
           // QueryTermScorer(query));
           highlighter.setTextFragmenter(new SimpleFragmenter(20));
@@ -1193,7 +1178,7 @@ public class HighlighterTest extends Bas
         numHighlights = 0;
         doSearching(new TermQuery(new Term(FIELD_NAME, "meat")));
         TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(texts[0]));
-        Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
+        Highlighter highlighter = getHighlighter(query, FIELD_NAME,
             HighlighterTest.this);// new Highlighter(this, new
         // QueryTermScorer(query));
         highlighter.setMaxDocCharsToAnalyze(30);
@@ -1230,8 +1215,7 @@ public class HighlighterTest extends Bas
           sb.append("stoppedtoken");
         }
         SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
-        Highlighter hg = getHighlighter(query, "data", analyzer.tokenStream(
-            "data", new StringReader(sb.toString())), fm);// new Highlighter(fm,
+        Highlighter hg = getHighlighter(query, "data", fm);// new Highlighter(fm,
         // new
         // QueryTermScorer(query));
         hg.setTextFragmenter(new NullFragmenter());
@@ -1266,7 +1250,9 @@ public class HighlighterTest extends Bas
 
         String text = "this is a text with searchterm in it";
         SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
-        Highlighter hg = getHighlighter(query, "text", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true).tokenStream("text", new StringReader(text)), fm);
+        TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true)
+            .tokenStream("text", new StringReader(text));
+        Highlighter hg = getHighlighter(query, "text", fm);
         hg.setTextFragmenter(new NullFragmenter());
         hg.setMaxDocCharsToAnalyze(36);
         String match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "text", text);
@@ -1309,7 +1295,7 @@ public class HighlighterTest extends Bas
         for (int i = 0; i < hits.totalHits; i++) {
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
-          Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this, false);
+          Highlighter highlighter = getHighlighter(query, FIELD_NAME, HighlighterTest.this, false);
 
           highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -1338,7 +1324,7 @@ public class HighlighterTest extends Bas
 
         for (String text : texts) {
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
-          Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
+          Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
           String result = highlighter.getBestFragment(tokenStream, text);
           assertNull("The highlight result should be null for text with no query terms", result);
@@ -1477,7 +1463,7 @@ public class HighlighterTest extends Bas
       @Override
       public boolean incrementToken() throws IOException {
         if(iter.hasNext()) {
-          Token token =  iter.next();
+          Token token = iter.next();
           clearAttributes();
           termAtt.setEmpty().append(token);
           posIncrAtt.setPositionIncrement(token.getPositionIncrement());
@@ -1486,7 +1472,12 @@ public class HighlighterTest extends Bas
         }
         return false;
       }
-     
+
+      @Override
+      public void reset() throws IOException {
+        super.reset();
+        iter = lst.iterator();
+      }
     };
   }
 
@@ -1532,6 +1523,12 @@ public class HighlighterTest extends Bas
         }
         return false;
       }
+
+      @Override
+      public void reset() throws IOException {
+        super.reset();
+        iter = lst.iterator();
+      }
     };
   }
 
@@ -1547,27 +1544,27 @@ public class HighlighterTest extends Bas
         String result;
 
         query = new TermQuery(new Term("text", "foo"));
-        highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2(), s, 3, "...");
         assertEquals("Hi-Speed10 <B>foo</B>", result);
 
         query = new TermQuery(new Term("text", "10"));
-        highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2(), s, 3, "...");
         assertEquals("Hi-Speed<B>10</B> foo", result);
 
         query = new TermQuery(new Term("text", "hi"));
-        highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2(), s, 3, "...");
         assertEquals("<B>Hi</B>-Speed10 foo", result);
 
         query = new TermQuery(new Term("text", "speed"));
-        highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2(), s, 3, "...");
         assertEquals("Hi-<B>Speed</B>10 foo", result);
 
         query = new TermQuery(new Term("text", "hispeed"));
-        highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2(), s, 3, "...");
         assertEquals("<B>Hi-Speed</B>10 foo", result);
 
@@ -1576,39 +1573,39 @@ public class HighlighterTest extends Bas
         booleanQuery.add(new TermQuery(new Term("text", "speed")), Occur.SHOULD);
 
         query = booleanQuery;
-        highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2(), s, 3, "...");
         assertEquals("<B>Hi-Speed</B>10 foo", result);
 
         // ///////////////// same tests, just put the bigger overlapping token
         // first
         query = new TermQuery(new Term("text", "foo"));
-        highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
         assertEquals("Hi-Speed10 <B>foo</B>", result);
 
         query = new TermQuery(new Term("text", "10"));
-        highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
         assertEquals("Hi-Speed<B>10</B> foo", result);
 
         query = new TermQuery(new Term("text", "hi"));
-        highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
         assertEquals("<B>Hi</B>-Speed10 foo", result);
 
         query = new TermQuery(new Term("text", "speed"));
-        highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
         assertEquals("Hi-<B>Speed</B>10 foo", result);
 
         query = new TermQuery(new Term("text", "hispeed"));
-        highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
         assertEquals("<B>Hi-Speed</B>10 foo", result);
 
         query = booleanQuery;
-        highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
+        highlighter = getHighlighter(query, "text", HighlighterTest.this);
         result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
         assertEquals("<B>Hi-Speed</B>10 foo", result);
       }
@@ -1628,7 +1625,7 @@ public class HighlighterTest extends Bas
   
   private Document doc( String f, String v ){
     Document doc = new Document();
-    doc.add( new Field( f, TextField.TYPE_STORED, v));
+    doc.add( new Field( f, v, TextField.TYPE_STORED));
     return doc;
   }
   
@@ -1779,7 +1776,7 @@ public class HighlighterTest extends Bas
   private void addDoc(IndexWriter writer, String text) throws IOException {
     Document d = new Document();
 
-    Field f = new Field(FIELD_NAME, TextField.TYPE_STORED, text);
+    Field f = new Field(FIELD_NAME, text, TextField.TYPE_STORED);
     d.add(f);
     writer.addDocument(d);
 
@@ -1802,7 +1799,7 @@ public class HighlighterTest extends Bas
 // behaviour to synonyms
 // ===================================================================
 
-final class SynonymAnalyzer extends ReusableAnalyzerBase {
+final class SynonymAnalyzer extends Analyzer {
   private Map<String,String> synonyms;
 
   public SynonymAnalyzer(Map<String,String> synonyms) {
@@ -1901,6 +1898,18 @@ final class SynonymTokenizer extends Tok
     this.st = null;
   }
 
+  @Override
+  public void end() throws IOException {
+    super.end();
+    this.realStream.end();
+  }
+
+  @Override
+  public void close() throws IOException {
+    super.close();
+    this.realStream.close();
+  }
+
   static abstract class TestHighlightRunner {
     static final int QUERY = 0;
     static final int QUERY_TERM = 1;
@@ -1908,11 +1917,11 @@ final class SynonymTokenizer extends Tok
     int mode = QUERY;
     Fragmenter frag = new SimpleFragmenter(20);
     
-    public Highlighter getHighlighter(Query query, String fieldName, TokenStream stream, Formatter formatter) {
-      return getHighlighter(query, fieldName, stream, formatter, true);
+    public Highlighter getHighlighter(Query query, String fieldName, Formatter formatter) {
+      return getHighlighter(query, fieldName, formatter, true);
     }
     
-    public Highlighter getHighlighter(Query query, String fieldName, TokenStream stream, Formatter formatter, boolean expanMultiTerm) {
+    public Highlighter getHighlighter(Query query, String fieldName, Formatter formatter, boolean expanMultiTerm) {
       Scorer scorer;
       if (mode == QUERY) {
         scorer = new QueryScorer(query, fieldName);

Modified: lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java Sat Oct  1 03:04:53 2011
@@ -49,7 +49,7 @@ public class OffsetLimitTokenFilterTest 
     assertTokenStreamContents(filter, new String[] {"short", "toolong",
         "evenmuchlongertext"});
     
-    checkOneTermReuse(new ReusableAnalyzerBase() {
+    checkOneTermReuse(new Analyzer() {
       
       @Override
       public TokenStreamComponents createComponents(String fieldName, Reader reader) {

Modified: lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java Sat Oct  1 03:04:53 2011
@@ -48,7 +48,7 @@ import org.apache.lucene.util.LuceneTest
 public class TokenSourcesTest extends LuceneTestCase {
   private static final String FIELD = "text";
 
-  private static final class OverlapAnalyzer extends ReusableAnalyzerBase {
+  private static final class OverlapAnalyzer extends Analyzer {
 
     @Override
     public TokenStreamComponents createComponents(String fieldName, Reader reader) {
@@ -109,7 +109,7 @@ public class TokenSourcesTest extends Lu
       FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
       customType.setStoreTermVectors(true);
       customType.setStoreTermVectorOffsets(true);
-      document.add(new Field(FIELD, customType, new TokenStreamOverlap()));
+      document.add(new Field(FIELD, new TokenStreamOverlap(), customType));
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -158,7 +158,7 @@ public class TokenSourcesTest extends Lu
       customType.setStoreTermVectors(true);
       customType.setStoreTermVectorOffsets(true);
       customType.setStoreTermVectorPositions(true);
-      document.add(new Field(FIELD, customType, new TokenStreamOverlap()));
+      document.add(new Field(FIELD, new TokenStreamOverlap(), customType));
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -206,7 +206,7 @@ public class TokenSourcesTest extends Lu
       FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
       customType.setStoreTermVectors(true);
       customType.setStoreTermVectorOffsets(true);
-      document.add(new Field(FIELD, customType, new TokenStreamOverlap()));
+      document.add(new Field(FIELD, new TokenStreamOverlap(), customType));
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -255,7 +255,7 @@ public class TokenSourcesTest extends Lu
       FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
       customType.setStoreTermVectors(true);
       customType.setStoreTermVectorOffsets(true);
-      document.add(new Field(FIELD, customType, new TokenStreamOverlap()));
+      document.add(new Field(FIELD, new TokenStreamOverlap(), customType));
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();

Modified: lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java Sat Oct  1 03:04:53 2011
@@ -170,7 +170,7 @@ public abstract class AbstractTestCase e
   protected List<BytesRef> analyze(String text, String field, Analyzer analyzer) throws IOException {
     List<BytesRef> bytesRefs = new ArrayList<BytesRef>();
 
-    TokenStream tokenStream = analyzer.reusableTokenStream(field, new StringReader(text));
+    TokenStream tokenStream = analyzer.tokenStream(field, new StringReader(text));
     TermToBytesRefAttribute termAttribute = tokenStream.getAttribute(TermToBytesRefAttribute.class);
 
     BytesRef bytesRef = termAttribute.getBytesRef();
@@ -194,7 +194,7 @@ public abstract class AbstractTestCase e
     return phraseQuery;
   }
 
-  static final class BigramAnalyzer extends ReusableAnalyzerBase {
+  static final class BigramAnalyzer extends Analyzer {
     @Override
     public TokenStreamComponents createComponents(String fieldName, Reader reader) {
       return new TokenStreamComponents(new BasicNGramTokenizer(reader));
@@ -359,7 +359,7 @@ public abstract class AbstractTestCase e
     customType.setStoreTermVectorOffsets(true);
     customType.setStoreTermVectorPositions(true);
     for( String value: values ) {
-      doc.add( new Field( F, customType, value ) );
+      doc.add( new Field( F, value, customType) );
     }
     writer.addDocument( doc );
     writer.close();
@@ -377,7 +377,7 @@ public abstract class AbstractTestCase e
     customType.setStoreTermVectorOffsets(true);
     customType.setStoreTermVectorPositions(true);
     for( String value: values ) {
-      doc.add( new Field( F, customType, value ));
+      doc.add( new Field( F, value, customType));
       //doc.add( new Field( F, value, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) );
     }
     writer.addDocument( doc );

Modified: lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java Sat Oct  1 03:04:53 2011
@@ -290,7 +290,7 @@ public class IndexTimeSynonymTest extend
     return token;
   }
   
-  public static final class TokenArrayAnalyzer extends ReusableAnalyzerBase {
+  public static final class TokenArrayAnalyzer extends Analyzer {
     final Token[] tokens;
     public TokenArrayAnalyzer(Token... tokens) {
       this.tokens = tokens;

Modified: lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java Sat Oct  1 03:04:53 2011
@@ -142,7 +142,7 @@ public class SimpleFragmentsBuilderTest 
     customType.setStoreTermVectors(true);
     customType.setStoreTermVectorOffsets(true);
     customType.setStoreTermVectorPositions(true);
-    doc.add( new Field( F, customType, "aaa" ) );
+    doc.add( new Field( F, "aaa", customType) );
     //doc.add( new Field( F, "aaa", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) );
     writer.addDocument( doc );
     writer.close();

Modified: lucene/dev/branches/lucene2621/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java Sat Oct  1 03:04:53 2011
@@ -211,39 +211,39 @@ public class TestIndicesEquals extends L
     customType.setStoreTermVectorOffsets(true);
     customType.setStoreTermVectorPositions(true);
     //document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-    document.add(new Field("a", customType, i + " Do you really want to go and live in that house all winter?"));
+    document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", customType));
     if (i > 0) {
       //document.add(new Field("b0", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      document.add(new Field("b0", customType, i + " All work and no play makes Jack a dull boy"));
+      document.add(new Field("b0", i + " All work and no play makes Jack a dull boy", customType));
 
       //document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
       FieldType customType2 = new FieldType(TextField.TYPE_STORED);
       customType2.setTokenized(false);
       customType2.setOmitNorms(true);
-      document.add(new Field("b1", customType2, i + " All work and no play makes Jack a dull boy"));
+      document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", customType2));
       
       //document.add(new Field("b2", i + " All work and no play makes Jack a dull boy", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.NO));
       FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED);
       customType3.setTokenized(false);
-      document.add(new Field("b1", customType3, i + " All work and no play makes Jack a dull boy"));
+      document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", customType3));
       
       //document.add(new Field("b3", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
       FieldType customType4 = new FieldType(TextField.TYPE_STORED);
       customType4.setIndexed(false);
       customType4.setTokenized(false);
-      document.add(new Field("b1", customType4, i + " All work and no play makes Jack a dull boy"));
+      document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", customType4));
       if (i > 1) {
         //document.add(new Field("c", i + " Redrum redrum", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-        document.add(new Field("c", customType, i + " Redrum redrum"));
+        document.add(new Field("c", i + " Redrum redrum", customType));
         if (i > 2) {
           //document.add(new Field("d", i + " Hello Danny, come and play with us... forever and ever. and ever.", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-          document.add(new Field("d", customType, i + " Hello Danny, come and play with us... forever and ever. and ever."));
+          document.add(new Field("d", i + " Hello Danny, come and play with us... forever and ever. and ever.", customType));
           if (i > 3) {
             //Field f = new Field("e", i + " Heres Johnny!", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
             //f.setOmitNorms(true);
             FieldType customType5 = new FieldType(TextField.TYPE_UNSTORED);
             customType5.setOmitNorms(true);
-            Field f = new Field("e", customType5, i + " Heres Johnny!");
+            Field f = new Field("e", i + " Heres Johnny!", customType5);
             document.add(f);
             if (i > 4) {
               final List<Token> tokens = new ArrayList<Token>(2);

Modified: lucene/dev/branches/lucene2621/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java Sat Oct  1 03:04:53 2011
@@ -41,7 +41,6 @@ import org.apache.lucene.index.FieldInve
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.FieldsEnum;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.OrdTermState;
 import org.apache.lucene.index.StoredFieldVisitor;
@@ -261,7 +260,7 @@ public class MemoryIndex {
     
     TokenStream stream;
     try {
-      stream = analyzer.reusableTokenStream(fieldName, new StringReader(text));
+      stream = analyzer.tokenStream(fieldName, new StringReader(text));
     } catch (IOException ex) {
       throw new RuntimeException(ex);
     }

Modified: lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java Sat Oct  1 03:04:53 2011
@@ -90,7 +90,7 @@ public class FieldSelectorVisitor extend
       ft.setStoreTermVectors(fieldInfo.storeTermVector);
       ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector);
       ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector);
-      doc.add(new Field(fieldInfo.name, ft, new String(b, "UTF-8"))); 
+      doc.add(new Field(fieldInfo.name, new String(b, "UTF-8"), ft));
       return accept != FieldSelectorResult.LOAD;
     case LAZY_LOAD:
     case LATENT:

Modified: lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/index/NRTManager.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/index/NRTManager.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/index/NRTManager.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/index/NRTManager.java Sat Oct  1 03:04:53 2011
@@ -19,16 +19,16 @@ package org.apache.lucene.index;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.List;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.index.IndexReader;       // javadocs
-import org.apache.lucene.document.Document;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SearcherWarmer;
 import org.apache.lucene.util.ThreadInterruptedException;
 
 // TODO
@@ -46,7 +46,7 @@ import org.apache.lucene.util.ThreadInte
  * caller is waiting for a specific generation searcher. </p>
  *
  * @lucene.experimental
-*/
+ */
 
 public class NRTManager implements Closeable {
   private final IndexWriter writer;
@@ -54,36 +54,36 @@ public class NRTManager implements Close
   private final AtomicLong indexingGen;
   private final AtomicLong searchingGen;
   private final AtomicLong noDeletesSearchingGen;
+  private final SearcherWarmer warmer;
   private final List<WaitingListener> waitingListeners = new CopyOnWriteArrayList<WaitingListener>();
 
   private volatile IndexSearcher currentSearcher;
   private volatile IndexSearcher noDeletesCurrentSearcher;
 
   /**
-   * Create new NRTManager.  Note that this installs a
-   * merged segment warmer on the provided IndexWriter's
-   * config.
-   * 
-   *  @param writer IndexWriter to open near-real-time
-   *         readers
-  */
-  public NRTManager(IndexWriter writer) throws IOException {
-    this(writer, null);
-  }
-
-  /**
-   * Create new NRTManager.  Note that this installs a
-   * merged segment warmer on the provided IndexWriter's
-   * config.
+   * Create new NRTManager.
    * 
    *  @param writer IndexWriter to open near-real-time
    *         readers
-   *  @param es ExecutorService to pass to the IndexSearcher
-  */
-  public NRTManager(IndexWriter writer, ExecutorService es) throws IOException {
+   *  @param es optional ExecutorService so different segments can
+   *         be searched concurrently (see {@link
+   *         IndexSearcher#IndexSearcher(IndexReader,ExecutorService)}.  Pass null
+   *         to search segments sequentially.
+   *  @param warmer optional {@link SearcherWarmer}.  Pass
+   *         null if you don't require the searcher to warmed
+   *         before going live.  If this is non-null then a
+   *         merged segment warmer is installed on the
+   *         provided IndexWriter's config.
+   *
+   *  <p><b>NOTE</b>: the provided {@link SearcherWarmer} is
+   *  not invoked for the initial searcher; you should
+   *  warm it yourself if necessary.
+   */
+  public NRTManager(IndexWriter writer, ExecutorService es, SearcherWarmer warmer) throws IOException {
 
     this.writer = writer;
     this.es = es;
+    this.warmer = warmer;
     indexingGen = new AtomicLong(1);
     searchingGen = new AtomicLong(-1);
     noDeletesSearchingGen = new AtomicLong(-1);
@@ -91,13 +91,15 @@ public class NRTManager implements Close
     // Create initial reader:
     swapSearcher(new IndexSearcher(IndexReader.open(writer, true), es), 0, true);
 
-    writer.getConfig().setMergedSegmentWarmer(
+    if (this.warmer != null) {
+      writer.getConfig().setMergedSegmentWarmer(
          new IndexWriter.IndexReaderWarmer() {
            @Override
            public void warm(IndexReader reader) throws IOException {
-             NRTManager.this.warm(reader);
+             NRTManager.this.warmer.warm(new IndexSearcher(reader, NRTManager.this.es));
            }
          });
+    }
   }
 
   /** NRTManager invokes this interface to notify it when a
@@ -120,25 +122,25 @@ public class NRTManager implements Close
     waitingListeners.remove(l);
   }
 
-  public long updateDocument(Term t, Document d, Analyzer a) throws IOException {
+  public long updateDocument(Term t, Iterable<? extends IndexableField> d, Analyzer a) throws IOException {
     writer.updateDocument(t, d, a);
     // Return gen as of when indexing finished:
     return indexingGen.get();
   }
 
-  public long updateDocument(Term t, Document d) throws IOException {
+  public long updateDocument(Term t, Iterable<? extends IndexableField> d) throws IOException {
     writer.updateDocument(t, d);
     // Return gen as of when indexing finished:
     return indexingGen.get();
   }
 
-  public long updateDocuments(Term t, Iterable<Document> docs, Analyzer a) throws IOException {
+  public long updateDocuments(Term t, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer a) throws IOException {
     writer.updateDocuments(t, docs, a);
     // Return gen as of when indexing finished:
     return indexingGen.get();
   }
 
-  public long updateDocuments(Term t, Iterable<Document> docs) throws IOException {
+  public long updateDocuments(Term t, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     writer.updateDocuments(t, docs);
     // Return gen as of when indexing finished:
     return indexingGen.get();
@@ -156,25 +158,25 @@ public class NRTManager implements Close
     return indexingGen.get();
   }
 
-  public long addDocument(Document d, Analyzer a) throws IOException {
+  public long addDocument(Iterable<? extends IndexableField> d, Analyzer a) throws IOException {
     writer.addDocument(d, a);
     // Return gen as of when indexing finished:
     return indexingGen.get();
   }
 
-  public long addDocuments(Iterable<Document> docs, Analyzer a) throws IOException {
+  public long addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer a) throws IOException {
     writer.addDocuments(docs, a);
     // Return gen as of when indexing finished:
     return indexingGen.get();
   }
 
-  public long addDocument(Document d) throws IOException {
+  public long addDocument(Iterable<? extends IndexableField> d) throws IOException {
     writer.addDocument(d);
     // Return gen as of when indexing finished:
     return indexingGen.get();
   }
 
-  public long addDocuments(Iterable<Document> docs) throws IOException {
+  public long addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     writer.addDocuments(docs);
     // Return gen as of when indexing finished:
     return indexingGen.get();
@@ -262,7 +264,10 @@ public class NRTManager implements Close
   }
 
   /** Release the searcher obtained from {@link
-   *  #get()} or {@link #get(long)}. */
+   *  #get()} or {@link #get(long)}.
+   *
+   *  <p><b>NOTE</b>: it's safe to call this after {@link
+   *  #close}. */
   public void release(IndexSearcher s) throws IOException {
     s.getIndexReader().decRef();
   }
@@ -304,21 +309,28 @@ public class NRTManager implements Close
     final IndexSearcher startSearcher = noDeletesSearchingGen.get() > searchingGen.get() ? noDeletesCurrentSearcher : currentSearcher;
     final IndexReader nextReader = startSearcher.getIndexReader().reopen(writer, applyDeletes);
 
-    warm(nextReader);
-
-    // Transfer reference to swapSearcher:
-    swapSearcher(new IndexSearcher(nextReader, es),
-                 newSearcherGen,
-                 applyDeletes);
-
-    return true;
-  }
+    if (nextReader != startSearcher.getIndexReader()) {
+      final IndexSearcher nextSearcher = new IndexSearcher(nextReader, es);
+      if (warmer != null) {
+        boolean success = false;
+        try {
+          warmer.warm(nextSearcher);
+          success = true;
+        } finally {
+          if (!success) {
+            nextReader.decRef();
+          }
+        }
+      }
 
-  /** Override this to warm the newly opened reader before
-   *  it's swapped in.  Note that this is called both for
-   *  newly merged segments and for new top-level readers
-   *  opened by #reopen. */
-  protected void warm(IndexReader reader) throws IOException {
+      // Transfer reference to swapSearcher:
+      swapSearcher(nextSearcher,
+                   newSearcherGen,
+                   applyDeletes);
+      return true;
+    } else {
+      return false;
+    }
   }
 
   // Steals a reference from newSearcher:
@@ -350,7 +362,12 @@ public class NRTManager implements Close
     //System.out.println(Thread.currentThread().getName() + ": done");
   }
 
-  /** NOTE: caller must separately close the writer. */
+  /** Close this NRTManager to future searching.  Any
+   *  searches still in process in other threads won't be
+   *  affected, and they should still call {@link #release}
+   *  after they are done.
+   *
+   * <p><b>NOTE</b>: caller must separately close the writer. */
   @Override
   public void close() throws IOException {
     swapSearcher(null, indexingGen.getAndIncrement(), true);

Modified: lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java Sat Oct  1 03:04:53 2011
@@ -137,22 +137,22 @@ public class AppendingCodec extends Code
     StandardPostingsReader.files(dir, segmentInfo, codecId, files);
     BlockTermsReader.files(dir, segmentInfo, codecId, files);
     FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files);
-    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files, getDocValuesUseCFS());
+    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files);
   }
 
   @Override
   public void getExtensions(Set<String> extensions) {
     StandardCodec.getStandardExtensions(extensions);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
+    DefaultDocValuesConsumer.getExtensions(extensions);
   }
   
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
+    return new DefaultDocValuesConsumer(state);
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
+    return new DefaultDocValuesProducer(state);
   }
 }

Modified: lucene/dev/branches/lucene2621/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java Sat Oct  1 03:04:53 2011
@@ -113,10 +113,10 @@ public class TestIndexSplitter extends L
     Directory fsDir = newFSDirectory(indexPath);
     IndexWriter indexWriter = new IndexWriter(fsDir, iwConfig);
     Document doc = new Document();
-    doc.add(new Field("content", StringField.TYPE_STORED, "doc 1"));
+    doc.add(new Field("content", "doc 1", StringField.TYPE_STORED));
     indexWriter.addDocument(doc);
     doc = new Document();
-    doc.add(new Field("content", StringField.TYPE_STORED, "doc 2"));
+    doc.add(new Field("content", "doc 2", StringField.TYPE_STORED));
     indexWriter.addDocument(doc);
     indexWriter.close();
     fsDir.close();

Modified: lucene/dev/branches/lucene2621/lucene/contrib/misc/src/test/org/apache/lucene/index/TestNRTManager.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/misc/src/test/org/apache/lucene/index/TestNRTManager.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/misc/src/test/org/apache/lucene/index/TestNRTManager.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/misc/src/test/org/apache/lucene/index/TestNRTManager.java Sat Oct  1 03:04:53 2011
@@ -17,155 +17,160 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
-import java.io.File;
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.codecs.CodecProvider;
+
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.SearcherWarmer;
 import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.NRTCachingDirectory;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LineFileDocs;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NamedThreadFactory;
-import org.apache.lucene.util._TestUtil;
-import org.junit.Test;
-
-// TODO
-//   - mix in optimize, addIndexes
-//   - randomoly mix in non-congruent docs
-
-// NOTE: This is a copy of TestNRTThreads, but swapping in
-// NRTManager for adding/updating/searching
-
-public class TestNRTManager extends LuceneTestCase {
-
-  private static class SubDocs {
-    public final String packID;
-    public final List<String> subIDs;
-    public boolean deleted;
-
-    public SubDocs(String packID, List<String> subIDs) {
-      this.packID = packID;
-      this.subIDs = subIDs;
-    }
-  }
 
-  // TODO: is there a pre-existing way to do this!!!
-  private Document cloneDoc(Document doc1) {
-    final Document doc2 = new Document();
-    for(IndexableField f : doc1) {
-      Field field1 = (Field) f;
-      
-      Field field2 = new Field(field1.name(),
-                              ((Field) f).fieldType(),
-                               field1.stringValue());
-      doc2.add(field2);
-    }
+public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase {
 
-    return doc2;
-  }
+  private final ThreadLocal<Long> lastGens = new ThreadLocal<Long>();
+  private boolean warmCalled;
 
-  @Test
   public void testNRTManager() throws Exception {
+    runTest("TestNRTManager");
+  }
 
-    final long t0 = System.currentTimeMillis();
+  @Override
+  protected IndexSearcher getFinalSearcher() throws Exception  {
+    if (VERBOSE) {
+      System.out.println("TEST: finalSearcher maxGen=" + maxGen);
+    }
+    return nrt.get(maxGen, true);
+  }
 
-    if (CodecProvider.getDefault().getDefaultFieldCodec().equals("SimpleText")) {
-      // no
-      CodecProvider.getDefault().setDefaultFieldCodec("Standard");
+  @Override
+  protected Directory getDirectory(Directory in) {
+    // Randomly swap in NRTCachingDir
+    if (random.nextBoolean()) {
+      if (VERBOSE) {
+        System.out.println("TEST: wrap NRTCachingDir");
+      }
+
+      return new NRTCachingDirectory(in, 5.0, 60.0);
+    } else {
+      return in;
     }
+  }
+
+  @Override
+  protected void updateDocuments(Term id, List<? extends Iterable<? extends IndexableField>> docs) throws Exception {
+    final long gen = nrt.updateDocuments(id, docs);
 
-    final LineFileDocs docs = new LineFileDocs(random);
-    final File tempDir = _TestUtil.getTempDir("nrtopenfiles");
-    final MockDirectoryWrapper _dir = newFSDirectory(tempDir);
-    _dir.setCheckIndexOnClose(false);  // don't double-checkIndex, we do it ourselves
-    Directory dir = _dir;
-    final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(IndexWriterConfig.OpenMode.CREATE);
-
-    if (LuceneTestCase.TEST_NIGHTLY) {
-      // newIWConfig makes smallish max seg size, which
-      // results in tons and tons of segments for this test
-      // when run nightly:
-      MergePolicy mp = conf.getMergePolicy();
-      if (mp instanceof TieredMergePolicy) {
-        ((TieredMergePolicy) mp).setMaxMergedSegmentMB(5000.);
-      } else if (mp instanceof LogByteSizeMergePolicy) {
-        ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1000.);
-      } else if (mp instanceof LogMergePolicy) {
-        ((LogMergePolicy) mp).setMaxMergeDocs(100000);
+    // Randomly verify the update "took":
+    if (random.nextInt(20) == 2) {
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id);
+      }
+      final IndexSearcher s = nrt.get(gen, true);
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: got searcher=" + s);
+      }
+      try {
+        assertEquals(docs.size(), s.search(new TermQuery(id), 10).totalHits);
+      } finally {
+        nrt.release(s);
       }
     }
+    
+    lastGens.set(gen);
+  }
 
-    conf.setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
-      @Override
-      public void warm(IndexReader reader) throws IOException {
-        if (VERBOSE) {
-          System.out.println("TEST: now warm merged reader=" + reader);
-        }
-        final int maxDoc = reader.maxDoc();
-        final Bits liveDocs = reader.getLiveDocs();
-        int sum = 0;
-        final int inc = Math.max(1, maxDoc/50);
-        for(int docID=0;docID<maxDoc;docID += inc) {
-          if (liveDocs == null || liveDocs.get(docID)) {
-            final Document doc = reader.document(docID);
-            sum += doc.getFields().size();
-          }
-        }
-
-        IndexSearcher searcher = newSearcher(reader);
-        sum += searcher.search(new TermQuery(new Term("body", "united")), 10).totalHits;
-        searcher.close();
-
-        if (VERBOSE) {
-          System.out.println("TEST: warm visited " + sum + " fields");
-        }
+  @Override
+  protected void addDocuments(Term id, List<? extends Iterable<? extends IndexableField>> docs) throws Exception {
+    final long gen = nrt.addDocuments(docs);
+    // Randomly verify the add "took":
+    if (random.nextInt(20) == 2) {
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id);
+      }
+      final IndexSearcher s = nrt.get(gen, false);
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: got searcher=" + s);
       }
-      });
+      try {
+        assertEquals(docs.size(), s.search(new TermQuery(id), 10).totalHits);
+      } finally {
+        nrt.release(s);
+      }
+    }
+    lastGens.set(gen);
+  }
 
-    if (random.nextBoolean()) {
+  @Override
+  protected void addDocument(Term id, Iterable<? extends IndexableField> doc) throws Exception {
+    final long gen = nrt.addDocument(doc);
+
+    // Randomly verify the add "took":
+    if (random.nextInt(20) == 2) {
       if (VERBOSE) {
-        System.out.println("TEST: wrap NRTCachingDir");
+        System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id);
       }
+      final IndexSearcher s = nrt.get(gen, false);
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: got searcher=" + s);
+      }
+      try {
+        assertEquals(1, s.search(new TermQuery(id), 10).totalHits);
+      } finally {
+        nrt.release(s);
+      }
+    }
+    lastGens.set(gen);
+  }
 
-      dir = new NRTCachingDirectory(dir, 5.0, 60.0);
+  @Override
+  protected void updateDocument(Term id, Iterable<? extends IndexableField> doc) throws Exception {
+    final long gen = nrt.updateDocument(id, doc);
+    // Randomly verify the udpate "took":
+    if (random.nextInt(20) == 2) {
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id);
+      }
+      final IndexSearcher s = nrt.get(gen, true);
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: got searcher=" + s);
+      }
+      try {
+        assertEquals(1, s.search(new TermQuery(id), 10).totalHits);
+      } finally {
+        nrt.release(s);
+      }
     }
-    
-    final IndexWriter writer = new IndexWriter(dir, conf);
-    
-    if (VERBOSE) {
-      writer.setInfoStream(System.out);
+    lastGens.set(gen);
+  }
+
+  @Override
+  protected void deleteDocuments(Term id) throws Exception {
+    final long gen = nrt.deleteDocuments(id);
+    // randomly verify the delete "took":
+    if (random.nextInt(20) == 7) {
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: verify del " + id);
+      }
+      final IndexSearcher s = nrt.get(gen, true);
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": nrt: got searcher=" + s);
+      }
+      try {
+        assertEquals(0, s.search(new TermQuery(id), 10).totalHits);
+      } finally {
+        nrt.release(s);
+      }
     }
-    _TestUtil.reduceOpenFiles(writer);
-    //System.out.println("TEST: conf=" + writer.getConfig());
+    lastGens.set(gen);
+  }
 
-    final ExecutorService es = random.nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory("NRT search threads"));
+  private NRTManager nrt;
+  private NRTManagerReopenThread nrtThread;
 
+  @Override
+  protected void doAfterWriter(ExecutorService es) throws Exception {
     final double minReopenSec = 0.01 + 0.05 * random.nextDouble();
     final double maxReopenSec = minReopenSec * (1.0 + 10 * random.nextDouble());
 
@@ -173,506 +178,57 @@ public class TestNRTManager extends Luce
       System.out.println("TEST: make NRTManager maxReopenSec=" + maxReopenSec + " minReopenSec=" + minReopenSec);
     }
 
-    final NRTManager nrt = new NRTManager(writer, es);
-    final NRTManagerReopenThread nrtThread = new NRTManagerReopenThread(nrt, maxReopenSec, minReopenSec);
+    nrt = new NRTManager(writer, es,
+                         new SearcherWarmer() {
+                           @Override
+                           public void warm(IndexSearcher s) throws IOException {
+                             TestNRTManager.this.warmCalled = true;
+                             s.search(new TermQuery(new Term("body", "united")), 10);
+                           }
+                         });
+    nrtThread = new NRTManagerReopenThread(nrt, maxReopenSec, minReopenSec);
     nrtThread.setName("NRT Reopen Thread");
     nrtThread.setPriority(Math.min(Thread.currentThread().getPriority()+2, Thread.MAX_PRIORITY));
     nrtThread.setDaemon(true);
     nrtThread.start();
+  }
 
-    final int NUM_INDEX_THREADS = _TestUtil.nextInt(random, 1, 3);
-    final int NUM_SEARCH_THREADS = _TestUtil.nextInt(random, 1, 3);
-    //final int NUM_INDEX_THREADS = 1;
-    //final int NUM_SEARCH_THREADS = 1;
-    if (VERBOSE) {
-      System.out.println("TEST: " + NUM_INDEX_THREADS + " index threads; " + NUM_SEARCH_THREADS + " search threads");
-    }
-
-    final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER;
-
-    final AtomicBoolean failed = new AtomicBoolean();
-    final AtomicInteger addCount = new AtomicInteger();
-    final AtomicInteger delCount = new AtomicInteger();
-    final AtomicInteger packCount = new AtomicInteger();
-    final List<Long> lastGens = new ArrayList<Long>();
-
-    final Set<String> delIDs = Collections.synchronizedSet(new HashSet<String>());
-    final List<SubDocs> allSubDocs = Collections.synchronizedList(new ArrayList<SubDocs>());
-
-    final long stopTime = System.currentTimeMillis() + RUN_TIME_SEC*1000;
-    Thread[] threads = new Thread[NUM_INDEX_THREADS];
-    for(int thread=0;thread<NUM_INDEX_THREADS;thread++) {
-      threads[thread] = new Thread() {
-          @Override
-          public void run() {
-            // TODO: would be better if this were cross thread, so that we make sure one thread deleting anothers added docs works:
-            final List<String> toDeleteIDs = new ArrayList<String>();
-            final List<SubDocs> toDeleteSubDocs = new ArrayList<SubDocs>();
-
-            long gen = 0;
-            while(System.currentTimeMillis() < stopTime && !failed.get()) {
-
-              //System.out.println(Thread.currentThread().getName() + ": cycle");
-              try {
-                // Occassional longish pause if running
-                // nightly
-                if (LuceneTestCase.TEST_NIGHTLY && random.nextInt(6) == 3) {
-                  if (VERBOSE) {
-                    System.out.println(Thread.currentThread().getName() + ": now long sleep");
-                  }
-                  Thread.sleep(_TestUtil.nextInt(random, 50, 500));
-                }
-
-                // Rate limit ingest rate:
-                Thread.sleep(_TestUtil.nextInt(random, 1, 10));
-                if (VERBOSE) {
-                  System.out.println(Thread.currentThread() + ": done sleep");
-                }
-
-                Document doc = docs.nextDoc();
-                if (doc == null) {
-                  break;
-                }
-                final String addedField;
-                if (random.nextBoolean()) {
-                  addedField = "extra" + random.nextInt(10);
-                  doc.add(new TextField(addedField, "a random field"));
-                } else {
-                  addedField = null;
-                }
-                if (random.nextBoolean()) {
-
-                  if (random.nextBoolean()) {
-                    // Add a pack of adjacent sub-docs
-                    final String packID;
-                    final SubDocs delSubDocs;
-                    if (toDeleteSubDocs.size() > 0 && random.nextBoolean()) {
-                      delSubDocs = toDeleteSubDocs.get(random.nextInt(toDeleteSubDocs.size()));
-                      assert !delSubDocs.deleted;
-                      toDeleteSubDocs.remove(delSubDocs);
-                      // reuse prior packID
-                      packID = delSubDocs.packID;
-                    } else {
-                      delSubDocs = null;
-                      // make new packID
-                      packID = packCount.getAndIncrement() + "";
-                    }
-
-                    final Field packIDField = newField("packID", packID, StringField.TYPE_STORED);
-                    final List<String> docIDs = new ArrayList<String>();
-                    final SubDocs subDocs = new SubDocs(packID, docIDs);
-                    final List<Document> docsList = new ArrayList<Document>();
-
-                    allSubDocs.add(subDocs);
-                    doc.add(packIDField);
-                    docsList.add(cloneDoc(doc));
-                    docIDs.add(doc.get("docid"));
-
-                    final int maxDocCount = _TestUtil.nextInt(random, 1, 10);
-                    while(docsList.size() < maxDocCount) {
-                      doc = docs.nextDoc();
-                      if (doc == null) {
-                        break;
-                      }
-                      docsList.add(cloneDoc(doc));
-                      docIDs.add(doc.get("docid"));
-                    }
-                    addCount.addAndGet(docsList.size());
-
-                    if (delSubDocs != null) {
-                      delSubDocs.deleted = true;
-                      delIDs.addAll(delSubDocs.subIDs);
-                      delCount.addAndGet(delSubDocs.subIDs.size());
-                      if (VERBOSE) {
-                        System.out.println("TEST: update pack packID=" + delSubDocs.packID + " count=" + docsList.size() + " docs=" + docIDs);
-                      }
-                      gen = nrt.updateDocuments(new Term("packID", delSubDocs.packID), docsList);
-                      /*
-                      // non-atomic:
-                      nrt.deleteDocuments(new Term("packID", delSubDocs.packID));
-                      for(Document subDoc : docsList) {
-                        nrt.addDocument(subDoc);
-                      }
-                      */
-                    } else {
-                      if (VERBOSE) {
-                        System.out.println("TEST: add pack packID=" + packID + " count=" + docsList.size() + " docs=" + docIDs);
-                      }
-                      gen = nrt.addDocuments(docsList);
-                      
-                      /*
-                      // non-atomic:
-                      for(Document subDoc : docsList) {
-                        nrt.addDocument(subDoc);
-                      }
-                      */
-                    }
-                    doc.removeField("packID");
-
-                    if (random.nextInt(5) == 2) {
-                      if (VERBOSE) {
-                        System.out.println(Thread.currentThread().getName() + ": buffer del id:" + packID);
-                      }
-                      toDeleteSubDocs.add(subDocs);
-                    }
-
-                    // randomly verify the add/update "took":
-                    if (random.nextInt(20) == 2) {
-                      final boolean applyDeletes = delSubDocs != null;
-                      final IndexSearcher s = nrt.get(gen, applyDeletes);
-                      try {
-                        assertEquals(docsList.size(), s.search(new TermQuery(new Term("packID", packID)), 10).totalHits);
-                      } finally {
-                        nrt.release(s);
-                      }
-                    }
-
-                  } else {
-                    if (VERBOSE) {
-                      System.out.println(Thread.currentThread().getName() + ": add doc docid:" + doc.get("docid"));
-                    }
-
-                    gen = nrt.addDocument(doc);
-                    addCount.getAndIncrement();
-
-                    // randomly verify the add "took":
-                    if (random.nextInt(20) == 2) {
-                      //System.out.println(Thread.currentThread().getName() + ": verify");
-                      final IndexSearcher s = nrt.get(gen, false);
-                      //System.out.println(Thread.currentThread().getName() + ": got s=" + s);
-                      try {
-                        assertEquals(1, s.search(new TermQuery(new Term("docid", doc.get("docid"))), 10).totalHits);
-                      } finally {
-                        nrt.release(s);
-                      }
-                      //System.out.println(Thread.currentThread().getName() + ": done verify");
-                    }
-
-                    if (random.nextInt(5) == 3) {
-                      if (VERBOSE) {
-                        System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid"));
-                      }
-                      toDeleteIDs.add(doc.get("docid"));
-                    }
-                  }
-                } else {
-                  // we use update but it never replaces a
-                  // prior doc
-                  if (VERBOSE) {
-                    System.out.println(Thread.currentThread().getName() + ": update doc id:" + doc.get("docid"));
-                  }
-                  gen = nrt.updateDocument(new Term("docid", doc.get("docid")), doc);
-                  addCount.getAndIncrement();
-
-                  // randomly verify the add "took":
-                  if (random.nextInt(20) == 2) {
-                    final IndexSearcher s = nrt.get(gen, true);
-                    try {
-                      assertEquals(1, s.search(new TermQuery(new Term("docid", doc.get("docid"))), 10).totalHits);
-                    } finally {
-                      nrt.release(s);
-                    }
-                  }
-
-                  if (random.nextInt(5) == 3) {
-                    if (VERBOSE) {
-                      System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid"));
-                    }
-                    toDeleteIDs.add(doc.get("docid"));
-                  }
-                }
-
-                if (random.nextInt(30) == 17) {
-                  if (VERBOSE) {
-                    System.out.println(Thread.currentThread().getName() + ": apply " + toDeleteIDs.size() + " deletes");
-                  }
-                  for(String id : toDeleteIDs) {
-                    if (VERBOSE) {
-                      System.out.println(Thread.currentThread().getName() + ": del term=id:" + id);
-                    }
-                    gen = nrt.deleteDocuments(new Term("docid", id));
-
-                    // randomly verify the delete "took":
-                    if (random.nextInt(20) == 7) {
-                      final IndexSearcher s = nrt.get(gen, true);
-                      try {
-                        assertEquals(0, s.search(new TermQuery(new Term("docid", id)), 10).totalHits);
-                      } finally {
-                        nrt.release(s);
-                      }
-                    }
-                  }
-
-                  final int count = delCount.addAndGet(toDeleteIDs.size());
-                  if (VERBOSE) {
-                    System.out.println(Thread.currentThread().getName() + ": tot " + count + " deletes");
-                  }
-                  delIDs.addAll(toDeleteIDs);
-                  toDeleteIDs.clear();
-
-                  for(SubDocs subDocs : toDeleteSubDocs) {
-                    assertTrue(!subDocs.deleted);
-                    gen = nrt.deleteDocuments(new Term("packID", subDocs.packID));
-                    subDocs.deleted = true;
-                    if (VERBOSE) {
-                      System.out.println("  del subs: " + subDocs.subIDs + " packID=" + subDocs.packID);
-                    }
-                    delIDs.addAll(subDocs.subIDs);
-                    delCount.addAndGet(subDocs.subIDs.size());
-
-                    // randomly verify the delete "took":
-                    if (random.nextInt(20) == 7) {
-                      final IndexSearcher s = nrt.get(gen, true);
-                      try {
-                        assertEquals(0, s.search(new TermQuery(new Term("packID", subDocs.packID)), 1).totalHits);
-                      } finally {
-                        nrt.release(s);
-                      }
-                    }
-                  }
-                  toDeleteSubDocs.clear();
-                }
-                if (addedField != null) {
-                  doc.removeField(addedField);
-                }
-              } catch (Throwable t) {
-                System.out.println(Thread.currentThread().getName() + ": FAILED: hit exc");
-                t.printStackTrace();
-                failed.set(true);
-                throw new RuntimeException(t);
-              }
-            }
-
-            lastGens.add(gen);
-            if (VERBOSE) {
-              System.out.println(Thread.currentThread().getName() + ": indexing done");
-            }
-          }
-        };
-      threads[thread].setDaemon(true);
-      threads[thread].start();
-    }
-
-    if (VERBOSE) {
-      System.out.println("TEST: DONE start indexing threads [" + (System.currentTimeMillis()-t0) + " ms]");
-    }
-
-    // let index build up a bit
-    Thread.sleep(100);
-
-    // silly starting guess:
-    final AtomicInteger totTermCount = new AtomicInteger(100);
-
-    // run search threads
-    final Thread[] searchThreads = new Thread[NUM_SEARCH_THREADS];
-    final AtomicInteger totHits = new AtomicInteger();
-
-    if (VERBOSE) {
-      System.out.println("TEST: start search threads");
-    }
-
-    for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
-      searchThreads[thread] = new Thread() {
-          @Override
-          public void run() {
-            while(System.currentTimeMillis() < stopTime && !failed.get()) {
-              final IndexSearcher s = nrt.get(random.nextBoolean());
-              try {
-                try {
-                  smokeTestSearcher(s);
-                  if (s.getIndexReader().numDocs() > 0) {
-                    Fields fields = MultiFields.getFields(s.getIndexReader());
-                    if (fields == null) {
-                      continue;
-                    }
-                    Terms terms = fields.terms("body");
-                    if (terms == null) {
-                      continue;
-                    }
-
-                    TermsEnum termsEnum = terms.iterator();
-                    int seenTermCount = 0;
-                    int shift;
-                    int trigger;
-                    if (totTermCount.get() < 10) {
-                      shift = 0;
-                      trigger = 1;
-                    } else {
-                      trigger = totTermCount.get()/10;
-                      shift = random.nextInt(trigger);
-                    }
-
-                    while(System.currentTimeMillis() < stopTime) {
-                      BytesRef term = termsEnum.next();
-                      if (term == null) {
-                        if (seenTermCount == 0) {
-                          break;
-                        }
-                        totTermCount.set(seenTermCount);
-                        seenTermCount = 0;
-                        if (totTermCount.get() < 10) {
-                          shift = 0;
-                          trigger = 1;
-                        } else {
-                          trigger = totTermCount.get()/10;
-                          //System.out.println("trigger " + trigger);
-                          shift = random.nextInt(trigger);
-                        }
-                        termsEnum.seekCeil(new BytesRef(""));
-                        continue;
-                      }
-                      seenTermCount++;
-                      // search 10 terms
-                      if (trigger == 0) {
-                        trigger = 1;
-                      }
-                      if ((seenTermCount + shift) % trigger == 0) {
-                        //if (VERBOSE) {
-                        //System.out.println(Thread.currentThread().getName() + " now search body:" + term.utf8ToString());
-                        //}
-                        totHits.addAndGet(runQuery(s, new TermQuery(new Term("body", term))));
-                      }
-                    }
-                    if (VERBOSE) {
-                      System.out.println(Thread.currentThread().getName() + ": search done");
-                    }
-                  }
-                } finally {
-                  nrt.release(s);
-                }
-              } catch (Throwable t) {
-                System.out.println(Thread.currentThread().getName() + ": FAILED: hit exc");
-                failed.set(true);
-                t.printStackTrace(System.out);
-                throw new RuntimeException(t);
-              }
-            }
-          }
-        };
-      searchThreads[thread].setDaemon(true);
-      searchThreads[thread].start();
-    }
-
-    if (VERBOSE) {
-      System.out.println("TEST: now join");
-    }
-    for(int thread=0;thread<NUM_INDEX_THREADS;thread++) {
-      threads[thread].join();
-    }
-    for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
-      searchThreads[thread].join();
-    }
-
-    if (VERBOSE) {
-      System.out.println("TEST: done join [" + (System.currentTimeMillis()-t0) + " ms]; addCount=" + addCount + " delCount=" + delCount);
-      System.out.println("TEST: search totHits=" + totHits);
+  @Override
+  protected void doAfterIndexingThreadDone() {
+    Long gen = lastGens.get();
+    if (gen != null) {
+      addMaxGen(gen);
     }
+  }
 
-    long maxGen = 0;
-    for(long gen : lastGens) {
-      maxGen = Math.max(maxGen, gen);
-    }
+  private long maxGen = -1;
 
-    final IndexSearcher s = nrt.get(maxGen, true);
+  private synchronized void addMaxGen(long gen) {
+    maxGen = Math.max(gen, maxGen);
+  }
 
-    boolean doFail = false;
-    for(String id : delIDs) {
-      final TopDocs hits = s.search(new TermQuery(new Term("docid", id)), 1);
-      if (hits.totalHits != 0) {
-        System.out.println("doc id=" + id + " is supposed to be deleted, but got docID=" + hits.scoreDocs[0].doc);
-        doFail = true;
-      }
-    }
+  @Override
+  protected void doSearching(ExecutorService es, long stopTime) throws Exception {
+    runSearchThreads(stopTime);
+  }
 
-    // Make sure each group of sub-docs are still in docID order:
-    for(SubDocs subDocs : allSubDocs) {
-      if (!subDocs.deleted) {
-        // We sort by relevance but the scores should be identical so sort falls back to by docID:
-        TopDocs hits = s.search(new TermQuery(new Term("packID", subDocs.packID)), 20);
-        assertEquals(subDocs.subIDs.size(), hits.totalHits);
-        int lastDocID = -1;
-        int startDocID = -1;
-        for(ScoreDoc scoreDoc : hits.scoreDocs) {
-          final int docID = scoreDoc.doc;
-          if (lastDocID != -1) {
-            assertEquals(1+lastDocID, docID);
-          } else {
-            startDocID = docID;
-          }
-          lastDocID = docID;
-          final Document doc = s.doc(docID);
-          assertEquals(subDocs.packID, doc.get("packID"));
-        }
-
-        lastDocID = startDocID - 1;
-        for(String subID : subDocs.subIDs) {
-          hits = s.search(new TermQuery(new Term("docid", subID)), 1);
-          assertEquals(1, hits.totalHits);
-          final int docID = hits.scoreDocs[0].doc;
-          if (lastDocID != -1) {
-            assertEquals(1+lastDocID, docID);
-          }
-          lastDocID = docID;
-        }          
-      } else {
-        for(String subID : subDocs.subIDs) {
-          assertEquals(0, s.search(new TermQuery(new Term("docid", subID)), 1).totalHits);
-        }
-      }
-    }
-    
-    final int endID = Integer.parseInt(docs.nextDoc().get("docid"));
-    for(int id=0;id<endID;id++) {
-      String stringID = ""+id;
-      if (!delIDs.contains(stringID)) {
-        final TopDocs hits = s.search(new TermQuery(new Term("docid", stringID)), 1);
-        if (hits.totalHits != 1) {
-          System.out.println("doc id=" + stringID + " is not supposed to be deleted, but got hitCount=" + hits.totalHits);
-          doFail = true;
-        }
-      }
-    }
-    assertFalse(doFail);
+  @Override
+  protected IndexSearcher getCurrentSearcher() throws Exception {
+    return nrt.get(random.nextBoolean());
+  }
 
-    assertEquals("index=" + writer.segString() + " addCount=" + addCount + " delCount=" + delCount, addCount.get() - delCount.get(), s.getIndexReader().numDocs());
+  @Override
+  protected void releaseSearcher(IndexSearcher s) throws Exception {
     nrt.release(s);
+  }
 
-    if (es != null) {
-      es.shutdown();
-      es.awaitTermination(1, TimeUnit.SECONDS);
-    }
-
-    writer.commit();
-    assertEquals("index=" + writer.segString() + " addCount=" + addCount + " delCount=" + delCount, addCount.get() - delCount.get(), writer.numDocs());
-
+  @Override
+  protected void doClose() throws Exception {
+    assertTrue(warmCalled);
     if (VERBOSE) {
       System.out.println("TEST: now close NRTManager");
     }
     nrtThread.close();
     nrt.close();
-    assertFalse(writer.anyNonBulkMerges);
-    writer.close(false);
-    _TestUtil.checkIndex(dir);
-    dir.close();
-    _TestUtil.rmDir(tempDir);
-    docs.close();
-
-    if (VERBOSE) {
-      System.out.println("TEST: done [" + (System.currentTimeMillis()-t0) + " ms]");
-    }
-  }
-
-  private int runQuery(IndexSearcher s, Query q) throws Exception {
-    s.search(q, 10);
-    return s.search(q, null, 10, new Sort(new SortField("title", SortField.Type.STRING))).totalHits;
-  }
-
-  private void smokeTestSearcher(IndexSearcher s) throws Exception {
-    runQuery(s, new TermQuery(new Term("body", "united")));
-    runQuery(s, new TermQuery(new Term("titleTokenized", "states")));
-    PhraseQuery pq = new PhraseQuery();
-    pq.add(new Term("body", "united"));
-    pq.add(new Term("body", "states"));
-    runQuery(s, pq);
   }
 }

Modified: lucene/dev/branches/lucene2621/lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java (original)
+++ lucene/dev/branches/lucene2621/lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java Sat Oct  1 03:04:53 2011
@@ -193,7 +193,7 @@ public class FuzzyLikeThisQuery extends 
     private void addTerms(IndexReader reader,FieldVals f) throws IOException
     {
         if(f.queryString==null) return;
-        TokenStream ts=analyzer.reusableTokenStream(f.fieldName,new StringReader(f.queryString));
+        TokenStream ts=analyzer.tokenStream(f.fieldName, new StringReader(f.queryString));
         CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
         
         int corpusNumDocs=reader.numDocs();

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/analysis/TokenStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/analysis/TokenStream.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/analysis/TokenStream.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/analysis/TokenStream.java Sat Oct  1 03:04:53 2011
@@ -167,11 +167,12 @@ public abstract class TokenStream extend
   }
 
   /**
-   * Resets this stream to the beginning. This is an optional operation, so
-   * subclasses may or may not implement this method. {@link #reset()} is not needed for
-   * the standard indexing process. However, if the tokens of a
-   * <code>TokenStream</code> are intended to be consumed more than once, it is
-   * necessary to implement {@link #reset()}. Note that if your TokenStream
+   * This method is called by a consumer before it begins consumption using
+   * {@link #incrementToken()}.
+   * <p/>
+   * Resets this stream to the beginning.  As all TokenStreams must be reusable,
+   * any implementations which have state that needs to be reset between usages
+   * of the TokenStream, must implement this method. Note that if your TokenStream
    * caches tokens and feeds them back again after a reset, it is imperative
    * that you clone the tokens when you store them away (on the first pass) as
    * well as when you return them (on future passes after {@link #reset()}).

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/analysis/Tokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/analysis/Tokenizer.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/analysis/Tokenizer.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/analysis/Tokenizer.java Sat Oct  1 03:04:53 2011
@@ -86,7 +86,7 @@ public abstract class Tokenizer extends 
   }
 
   /** Expert: Reset the tokenizer to a new reader.  Typically, an
-   *  analyzer (in its reusableTokenStream method) will use
+   *  analyzer (in its tokenStream method) will use
    *  this to re-use a previously created tokenizer. */
   public void reset(Reader input) throws IOException {
     this.input = input;

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/document/BinaryField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/document/BinaryField.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/document/BinaryField.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/document/BinaryField.java Sat Oct  1 03:04:53 2011
@@ -31,16 +31,16 @@ public final class BinaryField extends F
 
   /** Creates a new BinaryField */
   public BinaryField(String name, byte[] value) {
-    super(name, BinaryField.TYPE_STORED, value);
+    super(name, value, BinaryField.TYPE_STORED);
   }
   
   /** Creates a new BinaryField */
   public BinaryField(String name, byte[] value, int offset, int length) {
-    super(name, BinaryField.TYPE_STORED, value, offset, length);
+    super(name, value, offset, length, BinaryField.TYPE_STORED);
   }
 
   /** Creates a new BinaryField */
   public BinaryField(String name, BytesRef bytes) {
-    super(name, BinaryField.TYPE_STORED, bytes);
+    super(name, bytes, BinaryField.TYPE_STORED);
   }
 }

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/document/Document.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/document/Document.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/document/Document.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/document/Document.java Sat Oct  1 03:04:53 2011
@@ -209,6 +209,32 @@ public final class Document implements I
     return fields;
   }
   
+   private final static String[] NO_STRINGS = new String[0];
+
+  /**
+   * Returns an array of values of the field specified as the method parameter.
+   * This method returns an empty array when there are no
+   * matching fields.  It never returns null.
+   * For {@link NumericField}s it returns the string value of the number. If you want
+   * the actual {@code NumericField} instances back, use {@link #getFields}.
+   * @param name the name of the field
+   * @return a <code>String[]</code> of field values
+   */
+  public final String[] getValues(String name) {
+    List<String> result = new ArrayList<String>();
+    for (IndexableField field : fields) {
+      if (field.name().equals(name) && field.stringValue() != null) {
+        result.add(field.stringValue());
+      }
+    }
+    
+    if (result.size() == 0) {
+      return NO_STRINGS;
+    }
+    
+    return result.toArray(new String[result.size()]);
+  }
+
   /** Returns the string value of the field with the given name if any exist in
    * this document, or null.  If multiple fields exist with this name, this
    * method returns the first value added. If only binary fields with this name



Mime
View raw message