lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From busc...@apache.org
Subject svn commit: r966819 [13/20] - in /lucene/dev/branches/realtime_search: ./ lucene/ lucene/backwards/ lucene/contrib/ lucene/contrib/benchmark/conf/ lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ lucene/contrib/benchmark/src/...
Date Thu, 22 Jul 2010 19:34:52 GMT
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSimilarity.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSimilarity.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSimilarity.java Thu Jul 22 19:34:35 2010
@@ -23,6 +23,7 @@ import java.util.Collection;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.store.RAMDirectory;
@@ -64,8 +65,9 @@ public class TestSimilarity extends Luce
 
   public void testSimilarity() throws Exception {
     RAMDirectory store = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer()).setSimilarity(new SimpleSimilarity()));
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
+        .setSimilarity(new SimpleSimilarity()));
     
     Document d1 = new Document();
     d1.add(new Field("field", "a c", Field.Store.YES, Field.Index.ANALYZED));
@@ -75,10 +77,10 @@ public class TestSimilarity extends Luce
     
     writer.addDocument(d1);
     writer.addDocument(d2);
-    writer.optimize();
+    IndexReader reader = writer.getReader();
     writer.close();
 
-    Searcher searcher = new IndexSearcher(store, true);
+    Searcher searcher = new IndexSearcher(reader);
     searcher.setSimilarity(new SimpleSimilarity());
 
     Term a = new Term("field", "a");
@@ -173,5 +175,9 @@ public class TestSimilarity extends Luce
         return true;
       }
     });
+
+    searcher.close();
+    reader.close();
+    store.close();
   }
 }

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java Thu Jul 22 19:34:35 2010
@@ -17,13 +17,17 @@ package org.apache.lucene.search;
  * limitations under the License.
  */
 
+import java.util.Random;
+
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
@@ -46,6 +50,13 @@ public class TestSloppyPhraseQuery exten
   private static final PhraseQuery QUERY_2 = makePhraseQuery( S_2 );
   private static final PhraseQuery QUERY_4 = makePhraseQuery( "X A A");
 
+  private Random random;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    random = newRandom();
+  }
 
   /**
    * Test DOC_4 and QUERY_4.
@@ -117,18 +128,21 @@ public class TestSloppyPhraseQuery exten
     query.setSlop(slop);
 
     RAMDirectory ramDir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
+    RandomIndexWriter writer = new RandomIndexWriter(random, ramDir, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
     writer.addDocument(doc);
-    writer.close();
 
-    IndexSearcher searcher = new IndexSearcher(ramDir, true);
+    IndexReader reader = writer.getReader();
+
+    IndexSearcher searcher = new IndexSearcher(reader);
     TopDocs td = searcher.search(query,null,10);
     //System.out.println("slop: "+slop+"  query: "+query+"  doc: "+doc+"  Expecting number of hits: "+expectedNumResults+" maxScore="+td.getMaxScore());
     assertEquals("slop: "+slop+"  query: "+query+"  doc: "+doc+"  Wrong number of hits", expectedNumResults, td.totalHits);
 
     //QueryUtils.check(query,searcher);
-
+    writer.close();
     searcher.close();
+    reader.close();
     ramDir.close();
 
     return td.getMaxScore();

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSort.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSort.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSort.java Thu Jul 22 19:34:35 2010
@@ -36,6 +36,7 @@ import org.apache.lucene.index.IndexRead
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LogMergePolicy;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.queryParser.ParseException;
@@ -71,6 +72,7 @@ public class TestSort extends LuceneTest
   private Query queryG;
   private Sort sort;
 
+  private Random random = newRandom();
 
   public TestSort (String name) {
     super (name);
@@ -110,9 +112,9 @@ public class TestSort extends LuceneTest
   private Searcher getIndex (boolean even, boolean odd)
   throws IOException {
     RAMDirectory indexStore = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
-    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(1000);
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+
     for (int i=0; i<data.length; ++i) {
       if (((i%2)==0 && even) || ((i%2)==1 && odd)) {
         Document doc = new Document();
@@ -132,9 +134,9 @@ public class TestSort extends LuceneTest
         writer.addDocument (doc);
       }
     }
-    //writer.optimize ();
+    IndexReader reader = writer.getReader();
     writer.close ();
-    IndexSearcher s = new IndexSearcher (indexStore, true);
+    IndexSearcher s = new IndexSearcher (reader);
     s.setDefaultFieldSortScoring(true, true);
     return s;
   }
@@ -1039,4 +1041,23 @@ public class TestSort extends LuceneTest
     dir.close();
   }
 
+  public void testLUCENE2142() throws IOException {
+    RAMDirectory indexStore = new RAMDirectory ();
+    IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer()));
+    for (int i=0; i<5; i++) {
+        Document doc = new Document();
+        doc.add (new Field ("string", "a"+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
+        doc.add (new Field ("string", "b"+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
+        writer.addDocument (doc);
+    }
+    writer.optimize(); // enforce one segment to have a higher unique term count in all cases
+    writer.close();
+    sort.setSort(
+        new SortField("string", SortField.STRING),
+        SortField.FIELD_DOC );
+    // this should not throw AIOOBE or RuntimeEx
+    new IndexSearcher (indexStore, true).search(new MatchAllDocsQuery(), null, 500, sort);
+  }
+
 }

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java Thu Jul 22 19:34:35 2010
@@ -22,8 +22,8 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.store.Directory;
@@ -40,17 +40,17 @@ public class TestSpanQueryFilter extends
 
   public void testFilterWorks() throws Exception {
     Directory dir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
     for (int i = 0; i < 500; i++) {
       Document document = new Document();
       document.add(new Field("field", English.intToEnglish(i) + " equals " + English.intToEnglish(i),
               Field.Store.NO, Field.Index.ANALYZED));
       writer.addDocument(document);
     }
+    IndexReader reader = writer.getReader();
     writer.close();
 
-    IndexReader reader = IndexReader.open(dir, true);
-
     SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(10).trim()));
     SpanQueryFilter filter = new SpanQueryFilter(query);
     SpanFilterResult result = filter.bitSpans(reader);
@@ -69,6 +69,7 @@ public class TestSpanQueryFilter extends
       assertTrue("info.getPositions() Size: " + info.getPositions().size() + " is not: " + 2, info.getPositions().size() == 2);
     }
     reader.close();
+    dir.close();
   }
   
   int getDocIdSetSize(DocIdSet docIdSet) throws Exception {

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java Thu Jul 22 19:34:35 2010
@@ -23,8 +23,8 @@ import java.util.Locale;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -32,385 +32,448 @@ import org.apache.lucene.store.RAMDirect
 
 /**
  * A basic 'positive' Unit test class for the TermRangeFilter class.
- *
+ * 
  * <p>
- * NOTE: at the moment, this class only tests for 'positive' results,
- * it does not verify the results to ensure there are no 'false positives',
- * nor does it adequately test 'negative' results.  It also does not test
- * that garbage in results in an Exception.
+ * NOTE: at the moment, this class only tests for 'positive' results, it does
+ * not verify the results to ensure there are no 'false positives', nor does it
+ * adequately test 'negative' results. It also does not test that garbage in
+ * results in an Exception.
  */
 public class TestTermRangeFilter extends BaseTestRangeFilter {
-
-    public TestTermRangeFilter(String name) {
-	super(name);
-    }
-    public TestTermRangeFilter() {
-        super();
-    }
-
-    public void testRangeFilterId() throws IOException {
-
-        IndexReader reader = IndexReader.open(signedIndex.index, true);
-	IndexSearcher search = new IndexSearcher(reader);
-
-        int medId = ((maxId - minId) / 2);
-        
-        String minIP = pad(minId);
-        String maxIP = pad(maxId);
-        String medIP = pad(medId);
-    
-        int numDocs = reader.numDocs();
-        
-        assertEquals("num of docs", numDocs, 1+ maxId - minId);
-        
-  ScoreDoc[] result;
-        Query q = new TermQuery(new Term("body","body"));
-
-        // test id, bounded on both ends
-        
-  result = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,T), numDocs).scoreDocs;
-  assertEquals("find all", numDocs, result.length);
-
-  result = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,F), numDocs).scoreDocs;
-  assertEquals("all but last", numDocs-1, result.length);
-
-  result = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,T), numDocs).scoreDocs;
-  assertEquals("all but first", numDocs-1, result.length);
-        
-  result = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,F), numDocs).scoreDocs;
-        assertEquals("all but ends", numDocs-2, result.length);
-    
-        result = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,T), numDocs).scoreDocs;
-        assertEquals("med and up", 1+ maxId-medId, result.length);
-        
-        result = search.search(q,new TermRangeFilter("id",minIP,medIP,T,T), numDocs).scoreDocs;
-        assertEquals("up to med", 1+ medId-minId, result.length);
-
-        // unbounded id
-
-  result = search.search(q,new TermRangeFilter("id",minIP,null,T,F), numDocs).scoreDocs;
-  assertEquals("min and up", numDocs, result.length);
-
-  result = search.search(q,new TermRangeFilter("id",null,maxIP,F,T), numDocs).scoreDocs;
-  assertEquals("max and down", numDocs, result.length);
-
-  result = search.search(q,new TermRangeFilter("id",minIP,null,F,F), numDocs).scoreDocs;
-  assertEquals("not min, but up", numDocs-1, result.length);
-        
-  result = search.search(q,new TermRangeFilter("id",null,maxIP,F,F), numDocs).scoreDocs;
-  assertEquals("not max, but down", numDocs-1, result.length);
-        
-        result = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,F), numDocs).scoreDocs;
-        assertEquals("med and up, not max", maxId-medId, result.length);
-        
-        result = search.search(q,new TermRangeFilter("id",minIP,medIP,F,T), numDocs).scoreDocs;
-        assertEquals("not min, up to med", medId-minId, result.length);
-
-        // very small sets
-
-  result = search.search(q,new TermRangeFilter("id",minIP,minIP,F,F), numDocs).scoreDocs;
-  assertEquals("min,min,F,F", 0, result.length);
-  result = search.search(q,new TermRangeFilter("id",medIP,medIP,F,F), numDocs).scoreDocs;
-  assertEquals("med,med,F,F", 0, result.length);
-  result = search.search(q,new TermRangeFilter("id",maxIP,maxIP,F,F), numDocs).scoreDocs;
-  assertEquals("max,max,F,F", 0, result.length);
-                     
-  result = search.search(q,new TermRangeFilter("id",minIP,minIP,T,T), numDocs).scoreDocs;
-  assertEquals("min,min,T,T", 1, result.length);
-  result = search.search(q,new TermRangeFilter("id",null,minIP,F,T), numDocs).scoreDocs;
-  assertEquals("nul,min,F,T", 1, result.length);
-
-  result = search.search(q,new TermRangeFilter("id",maxIP,maxIP,T,T), numDocs).scoreDocs;
-  assertEquals("max,max,T,T", 1, result.length);
-  result = search.search(q,new TermRangeFilter("id",maxIP,null,T,F), numDocs).scoreDocs;
-  assertEquals("max,nul,T,T", 1, result.length);
-
-  result = search.search(q,new TermRangeFilter("id",medIP,medIP,T,T), numDocs).scoreDocs;
-  assertEquals("med,med,T,T", 1, result.length);
-        
-    }
-
-    public void testRangeFilterIdCollating() throws IOException {
-
-        IndexReader reader = IndexReader.open(signedIndex.index, true);
-        IndexSearcher search = new IndexSearcher(reader);
-
-        Collator c = Collator.getInstance(Locale.ENGLISH);
-
-        int medId = ((maxId - minId) / 2);
-
-        String minIP = pad(minId);
-        String maxIP = pad(maxId);
-        String medIP = pad(medId);
-
-        int numDocs = reader.numDocs();
-
-        assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
-        Query q = new TermQuery(new Term("body","body"));
-
-        // test id, bounded on both ends
-        int numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,T,c), 1000).totalHits;
-        assertEquals("find all", numDocs, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,F,c), 1000).totalHits;
-        assertEquals("all but last", numDocs-1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,T,c), 1000).totalHits;
-        assertEquals("all but first", numDocs-1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,F,c), 1000).totalHits;
-        assertEquals("all but ends", numDocs-2, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,T,c), 1000).totalHits;
-        assertEquals("med and up", 1+ maxId-medId, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,medIP,T,T,c), 1000).totalHits;
-        assertEquals("up to med", 1+ medId-minId, numHits);
-
-        // unbounded id
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,null,T,F,c), 1000).totalHits;
-        assertEquals("min and up", numDocs, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",null,maxIP,F,T,c), 1000).totalHits;
-        assertEquals("max and down", numDocs, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,null,F,F,c), 1000).totalHits;
-        assertEquals("not min, but up", numDocs-1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",null,maxIP,F,F,c), 1000).totalHits;
-        assertEquals("not max, but down", numDocs-1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,F,c), 1000).totalHits;
-        assertEquals("med and up, not max", maxId-medId, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,medIP,F,T,c), 1000).totalHits;
-        assertEquals("not min, up to med", medId-minId, numHits);
-
-        // very small sets
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,minIP,F,F,c), 1000).totalHits;
-        assertEquals("min,min,F,F", 0, numHits);
-        numHits = search.search(q,new TermRangeFilter("id",medIP,medIP,F,F,c), 1000).totalHits;
-        assertEquals("med,med,F,F", 0, numHits);
-        numHits = search.search(q,new TermRangeFilter("id",maxIP,maxIP,F,F,c), 1000).totalHits;
-        assertEquals("max,max,F,F", 0, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",minIP,minIP,T,T,c), 1000).totalHits;
-        assertEquals("min,min,T,T", 1, numHits);
-        numHits = search.search(q,new TermRangeFilter("id",null,minIP,F,T,c), 1000).totalHits;
-        assertEquals("nul,min,F,T", 1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",maxIP,maxIP,T,T,c), 1000).totalHits;
-        assertEquals("max,max,T,T", 1, numHits);
-        numHits = search.search(q,new TermRangeFilter("id",maxIP,null,T,F,c), 1000).totalHits;
-        assertEquals("max,nul,T,T", 1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("id",medIP,medIP,T,T,c), 1000).totalHits;
-        assertEquals("med,med,T,T", 1, numHits);
-    }
-
-    public void testRangeFilterRand() throws IOException {
-
-  IndexReader reader = IndexReader.open(signedIndex.index, true);
-	IndexSearcher search = new IndexSearcher(reader);
-
-        String minRP = pad(signedIndex.minR);
-        String maxRP = pad(signedIndex.maxR);
+  
+  public void testRangeFilterId() throws IOException {
     
-        int numDocs = reader.numDocs();
-        
-        assertEquals("num of docs", numDocs, 1+ maxId - minId);
-        
-  ScoreDoc[] result;
-        Query q = new TermQuery(new Term("body","body"));
-
-        // test extremes, bounded on both ends
-        
-  result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,T), numDocs).scoreDocs;
-  assertEquals("find all", numDocs, result.length);
-
-  result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,F), numDocs).scoreDocs;
-  assertEquals("all but biggest", numDocs-1, result.length);
-
-  result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,T), numDocs).scoreDocs;
-  assertEquals("all but smallest", numDocs-1, result.length);
-        
-  result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,F), numDocs).scoreDocs;
-        assertEquals("all but extremes", numDocs-2, result.length);
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = new IndexSearcher(reader);
     
-        // unbounded
-
-  result = search.search(q,new TermRangeFilter("rand",minRP,null,T,F), numDocs).scoreDocs;
-  assertEquals("smallest and up", numDocs, result.length);
-
-  result = search.search(q,new TermRangeFilter("rand",null,maxRP,F,T), numDocs).scoreDocs;
-  assertEquals("biggest and down", numDocs, result.length);
-
-  result = search.search(q,new TermRangeFilter("rand",minRP,null,F,F), numDocs).scoreDocs;
-  assertEquals("not smallest, but up", numDocs-1, result.length);
-        
-  result = search.search(q,new TermRangeFilter("rand",null,maxRP,F,F), numDocs).scoreDocs;
-  assertEquals("not biggest, but down", numDocs-1, result.length);
-        
-        // very small sets
-
-  result = search.search(q,new TermRangeFilter("rand",minRP,minRP,F,F), numDocs).scoreDocs;
-  assertEquals("min,min,F,F", 0, result.length);
-  result = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,F,F), numDocs).scoreDocs;
-  assertEquals("max,max,F,F", 0, result.length);
-                     
-  result = search.search(q,new TermRangeFilter("rand",minRP,minRP,T,T), numDocs).scoreDocs;
-  assertEquals("min,min,T,T", 1, result.length);
-  result = search.search(q,new TermRangeFilter("rand",null,minRP,F,T), numDocs).scoreDocs;
-  assertEquals("nul,min,F,T", 1, result.length);
-
-  result = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,T,T), numDocs).scoreDocs;
-  assertEquals("max,max,T,T", 1, result.length);
-  result = search.search(q,new TermRangeFilter("rand",maxRP,null,T,F), numDocs).scoreDocs;
-  assertEquals("max,nul,T,T", 1, result.length);
-        
-    }
-
-    public void testRangeFilterRandCollating() throws IOException {
-
-        // using the unsigned index because collation seems to ignore hyphens
-        IndexReader reader = IndexReader.open(unsignedIndex.index, true);
-        IndexSearcher search = new IndexSearcher(reader);
-
-        Collator c = Collator.getInstance(Locale.ENGLISH);
-
-        String minRP = pad(unsignedIndex.minR);
-        String maxRP = pad(unsignedIndex.maxR);
-
-        int numDocs = reader.numDocs();
-
-        assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
-        Query q = new TermQuery(new Term("body","body"));
-
-        // test extremes, bounded on both ends
-
-        int numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,T,c), 1000).totalHits;
-        assertEquals("find all", numDocs, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,F,c), 1000).totalHits;
-        assertEquals("all but biggest", numDocs-1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,T,c), 1000).totalHits;
-        assertEquals("all but smallest", numDocs-1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,F,c), 1000).totalHits;
-        assertEquals("all but extremes", numDocs-2, numHits);
-
-        // unbounded
-
-        numHits = search.search(q,new TermRangeFilter("rand",minRP,null,T,F,c), 1000).totalHits;
-        assertEquals("smallest and up", numDocs, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("rand",null,maxRP,F,T,c), 1000).totalHits;
-        assertEquals("biggest and down", numDocs, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("rand",minRP,null,F,F,c), 1000).totalHits;
-        assertEquals("not smallest, but up", numDocs-1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("rand",null,maxRP,F,F,c), 1000).totalHits;
-        assertEquals("not biggest, but down", numDocs-1, numHits);
-
-        // very small sets
-
-        numHits = search.search(q,new TermRangeFilter("rand",minRP,minRP,F,F,c), 1000).totalHits;
-        assertEquals("min,min,F,F", 0, numHits);
-        numHits = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,F,F,c), 1000).totalHits;
-        assertEquals("max,max,F,F", 0, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("rand",minRP,minRP,T,T,c), 1000).totalHits;
-        assertEquals("min,min,T,T", 1, numHits);
-        numHits = search.search(q,new TermRangeFilter("rand",null,minRP,F,T,c), 1000).totalHits;
-        assertEquals("nul,min,F,T", 1, numHits);
-
-        numHits = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,T,T,c), 1000).totalHits;
-        assertEquals("max,max,T,T", 1, numHits);
-        numHits = search.search(q,new TermRangeFilter("rand",maxRP,null,T,F,c), 1000).totalHits;
-        assertEquals("max,nul,T,T", 1, numHits);
-    }
+    int medId = ((maxId - minId) / 2);
     
-    public void testFarsi() throws Exception {
-            
-        /* build an index */
-      RAMDirectory farsiIndex = new RAMDirectory();
-      IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
-          TEST_VERSION_CURRENT, new MockAnalyzer()));
-        Document doc = new Document();
-        doc.add(new Field("content","\u0633\u0627\u0628", 
-                          Field.Store.YES, Field.Index.NOT_ANALYZED));
-        doc.add(new Field("body", "body",
-                          Field.Store.YES, Field.Index.NOT_ANALYZED));
-        writer.addDocument(doc);
-            
-        writer.optimize();
-        writer.close();
-
-        IndexReader reader = IndexReader.open(farsiIndex, true);
-        IndexSearcher search = new IndexSearcher(reader);
-        Query q = new TermQuery(new Term("body","body"));
-
-        // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
-        // RuleBasedCollator.  However, the Arabic Locale seems to order the Farsi
-        // characters properly.
-        Collator collator = Collator.getInstance(new Locale("ar"));
-        
-        // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
-        // orders the U+0698 character before the U+0633 character, so the single
-        // index Term below should NOT be returned by a TermRangeFilter with a Farsi
-        // Collator (or an Arabic one for the case when Farsi is not supported).
-        int numHits = search.search
-            (q, new TermRangeFilter("content", "\u062F", "\u0698", T, T, collator), 1000).totalHits;
-        assertEquals("The index Term should not be included.", 0, numHits);
-
-        numHits = search.search
-            (q, new TermRangeFilter("content", "\u0633", "\u0638", T, T, collator), 1000).totalHits;
-        assertEquals("The index Term should be included.", 1, numHits);
-        search.close();
-    }
-
-    public void testDanish() throws Exception {
-            
-        /* build an index */
-        RAMDirectory danishIndex = new RAMDirectory();
-        IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig(
-            TEST_VERSION_CURRENT, new MockAnalyzer()));
-        // Danish collation orders the words below in the given order
-        // (example taken from TestSort.testInternationalSort() ).
-        String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
-        for (int docnum = 0 ; docnum < words.length ; ++docnum) {   
-            Document doc = new Document();
-            doc.add(new Field("content", words[docnum], 
-                              Field.Store.YES, Field.Index.NOT_ANALYZED));
-            doc.add(new Field("body", "body",
-                              Field.Store.YES, Field.Index.NOT_ANALYZED));
-            writer.addDocument(doc);
-        }
-        writer.optimize();
-        writer.close();
-
-        IndexReader reader = IndexReader.open(danishIndex, true);
-        IndexSearcher search = new IndexSearcher(reader);
-        Query q = new TermQuery(new Term("body","body"));
-
-        Collator collator = Collator.getInstance(new Locale("da", "dk"));
-
-        // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
-        // but Danish collation does.
-        int numHits = search.search
-            (q, new TermRangeFilter("content", "H\u00D8T", "MAND", F, F, collator), 1000).totalHits;
-        assertEquals("The index Term should be included.", 1, numHits);
-
-        numHits = search.search
-            (q, new TermRangeFilter("content", "H\u00C5T", "MAND", F, F, collator), 1000).totalHits;
-        assertEquals
-            ("The index Term should not be included.", 0, numHits);
-        search.close();
+    String minIP = pad(minId);
+    String maxIP = pad(maxId);
+    String medIP = pad(medId);
+    
+    int numDocs = reader.numDocs();
+    
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+    
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // test id, bounded on both ends
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, F),
+        numDocs).scoreDocs;
+    assertEquals("all but last", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("all but first", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs - 2, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("med and up", 1 + maxId - medId, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, medIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("up to med", 1 + medId - minId, result.length);
+    
+    // unbounded id
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, null, T, F),
+        numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", null, maxIP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, null, F, F),
+        numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", null, maxIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, F),
+        numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId - medId, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, medIP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId - minId, result.length);
+    
+    // very small sets
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, minIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q, new TermRangeFilter("id", medIP, medIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, minIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q, new TermRangeFilter("id", null, minIP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q, new TermRangeFilter("id", maxIP, null, T, F),
+        numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", medIP, medIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    
+  }
+  
+  public void testRangeFilterIdCollating() throws IOException {
+    
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = new IndexSearcher(reader);
+    
+    Collator c = Collator.getInstance(Locale.ENGLISH);
+    
+    int medId = ((maxId - minId) / 2);
+    
+    String minIP = pad(minId);
+    String maxIP = pad(maxId);
+    String medIP = pad(medId);
+    
+    int numDocs = reader.numDocs();
+    
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+    
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // test id, bounded on both ends
+    int numHits = search.search(q, new TermRangeFilter("id", minIP, maxIP, T,
+        T, c), 1000).totalHits;
+    assertEquals("find all", numDocs, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, maxIP, T, F, c), 1000).totalHits;
+    assertEquals("all but last", numDocs - 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, maxIP, F, T, c), 1000).totalHits;
+    assertEquals("all but first", numDocs - 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, maxIP, F, F, c), 1000).totalHits;
+    assertEquals("all but ends", numDocs - 2, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", medIP, maxIP, T, T, c), 1000).totalHits;
+    assertEquals("med and up", 1 + maxId - medId, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, medIP, T, T, c), 1000).totalHits;
+    assertEquals("up to med", 1 + medId - minId, numHits);
+    
+    // unbounded id
+    
+    numHits = search.search(q, new TermRangeFilter("id", minIP, null, T, F, c),
+        1000).totalHits;
+    assertEquals("min and up", numDocs, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, T, c),
+        1000).totalHits;
+    assertEquals("max and down", numDocs, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("id", minIP, null, F, F, c),
+        1000).totalHits;
+    assertEquals("not min, but up", numDocs - 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, F, c),
+        1000).totalHits;
+    assertEquals("not max, but down", numDocs - 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", medIP, maxIP, T, F, c), 1000).totalHits;
+    assertEquals("med and up, not max", maxId - medId, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, medIP, F, T, c), 1000).totalHits;
+    assertEquals("not min, up to med", medId - minId, numHits);
+    
+    // very small sets
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, minIP, F, F, c), 1000).totalHits;
+    assertEquals("min,min,F,F", 0, numHits);
+    numHits = search.search(q,
+        new TermRangeFilter("id", medIP, medIP, F, F, c), 1000).totalHits;
+    assertEquals("med,med,F,F", 0, numHits);
+    numHits = search.search(q,
+        new TermRangeFilter("id", maxIP, maxIP, F, F, c), 1000).totalHits;
+    assertEquals("max,max,F,F", 0, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, minIP, T, T, c), 1000).totalHits;
+    assertEquals("min,min,T,T", 1, numHits);
+    numHits = search.search(q, new TermRangeFilter("id", null, minIP, F, T, c),
+        1000).totalHits;
+    assertEquals("nul,min,F,T", 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", maxIP, maxIP, T, T, c), 1000).totalHits;
+    assertEquals("max,max,T,T", 1, numHits);
+    numHits = search.search(q, new TermRangeFilter("id", maxIP, null, T, F, c),
+        1000).totalHits;
+    assertEquals("max,nul,T,T", 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", medIP, medIP, T, T, c), 1000).totalHits;
+    assertEquals("med,med,T,T", 1, numHits);
+  }
+  
+  public void testRangeFilterRand() throws IOException {
+    
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = new IndexSearcher(reader);
+    
+    String minRP = pad(signedIndexDir.minR);
+    String maxRP = pad(signedIndexDir.maxR);
+    
+    int numDocs = reader.numDocs();
+    
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+    
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // test extremes, bounded on both ends
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F),
+        numDocs).scoreDocs;
+    assertEquals("all but biggest", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("all but smallest", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("all but extremes", numDocs - 2, result.length);
+    
+    // unbounded
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, null, T, F),
+        numDocs).scoreDocs;
+    assertEquals("smallest and up", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("biggest and down", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, null, F, F),
+        numDocs).scoreDocs;
+    assertEquals("not smallest, but up", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("not biggest, but down", numDocs - 1, result.length);
+    
+    // very small sets
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q, new TermRangeFilter("rand", null, minRP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q, new TermRangeFilter("rand", maxRP, null, T, F),
+        numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+    
+  }
+  
+  public void testRangeFilterRandCollating() throws IOException {
+    
+    // using the unsigned index because collation seems to ignore hyphens
+    IndexReader reader = unsignedIndexReader;
+    IndexSearcher search = new IndexSearcher(reader);
+    
+    Collator c = Collator.getInstance(Locale.ENGLISH);
+    
+    String minRP = pad(unsignedIndexDir.minR);
+    String maxRP = pad(unsignedIndexDir.maxR);
+    
+    int numDocs = reader.numDocs();
+    
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+    
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // test extremes, bounded on both ends
+    
+    int numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T,
+        T, c), 1000).totalHits;
+    assertEquals("find all", numDocs, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F,
+        c), 1000).totalHits;
+    assertEquals("all but biggest", numDocs - 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T,
+        c), 1000).totalHits;
+    assertEquals("all but smallest", numDocs - 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F,
+        c), 1000).totalHits;
+    assertEquals("all but extremes", numDocs - 2, numHits);
+    
+    // unbounded
+    
+    numHits = search.search(q,
+        new TermRangeFilter("rand", minRP, null, T, F, c), 1000).totalHits;
+    assertEquals("smallest and up", numDocs, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("rand", null, maxRP, F, T, c), 1000).totalHits;
+    assertEquals("biggest and down", numDocs, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("rand", minRP, null, F, F, c), 1000).totalHits;
+    assertEquals("not smallest, but up", numDocs - 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("rand", null, maxRP, F, F, c), 1000).totalHits;
+    assertEquals("not biggest, but down", numDocs - 1, numHits);
+    
+    // very small sets
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F,
+        c), 1000).totalHits;
+    assertEquals("min,min,F,F", 0, numHits);
+    numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F,
+        c), 1000).totalHits;
+    assertEquals("max,max,F,F", 0, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T,
+        c), 1000).totalHits;
+    assertEquals("min,min,T,T", 1, numHits);
+    numHits = search.search(q,
+        new TermRangeFilter("rand", null, minRP, F, T, c), 1000).totalHits;
+    assertEquals("nul,min,F,T", 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T,
+        c), 1000).totalHits;
+    assertEquals("max,max,T,T", 1, numHits);
+    numHits = search.search(q,
+        new TermRangeFilter("rand", maxRP, null, T, F, c), 1000).totalHits;
+    assertEquals("max,nul,T,T", 1, numHits);
+  }
+  
+  public void testFarsi() throws Exception {
+    
+    /* build an index */
+    RAMDirectory farsiIndex = new RAMDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    Document doc = new Document();
+    doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc
+        .add(new Field("body", "body", Field.Store.YES,
+            Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+    
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher search = new IndexSearcher(reader);
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+    // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
+    // characters properly.
+    Collator collator = Collator.getInstance(new Locale("ar"));
+    
+    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+    // orders the U+0698 character before the U+0633 character, so the single
+    // index Term below should NOT be returned by a TermRangeFilter with a Farsi
+    // Collator (or an Arabic one for the case when Farsi is not supported).
+    int numHits = search.search(q, new TermRangeFilter("content", "\u062F",
+        "\u0698", T, T, collator), 1000).totalHits;
+    assertEquals("The index Term should not be included.", 0, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("content", "\u0633",
+        "\u0638", T, T, collator), 1000).totalHits;
+    assertEquals("The index Term should be included.", 1, numHits);
+    search.close();
+    reader.close();
+    farsiIndex.close();
+  }
+  
+  public void testDanish() throws Exception {
+    
+    /* build an index */
+    RAMDirectory danishIndex = new RAMDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    // Danish collation orders the words below in the given order
+    // (example taken from TestSort.testInternationalSort() ).
+    String[] words = {"H\u00D8T", "H\u00C5T", "MAND"};
+    for (int docnum = 0; docnum < words.length; ++docnum) {
+      Document doc = new Document();
+      doc.add(new Field("content", words[docnum], Field.Store.YES,
+          Field.Index.NOT_ANALYZED));
+      doc.add(new Field("body", "body", Field.Store.YES,
+          Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
     }
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher search = new IndexSearcher(reader);
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    Collator collator = Collator.getInstance(new Locale("da", "dk"));
+    
+    // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
+    // but Danish collation does.
+    int numHits = search.search(q, new TermRangeFilter("content", "H\u00D8T",
+        "MAND", F, F, collator), 1000).totalHits;
+    assertEquals("The index Term should be included.", 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("content", "H\u00C5T",
+        "MAND", F, F, collator), 1000).totalHits;
+    assertEquals("The index Term should not be included.", 0, numHits);
+    search.close();
+    reader.close();
+    danishIndex.close();
+  }
 }

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java Thu Jul 22 19:34:35 2010
@@ -420,9 +420,4 @@ public class TestTermRangeQuery extends 
     //assertEquals("C added => A,B,<empty string>,C in range", 3, hits.length());
      searcher.close();
   }
-  
-  @Deprecated
-  public void testBackwardsLayer() {
-    assertTrue(new TermRangeQuery("dummy", null, null, true, true).hasNewAPI);
-  }
 }

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermScorer.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermScorer.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermScorer.java Thu Jul 22 19:34:35 2010
@@ -26,152 +26,155 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.RAMDirectory;
 
-public class TestTermScorer extends LuceneTestCase
-{
-    protected RAMDirectory directory;
-    private static final String FIELD = "field";
-
-    protected String[] values = new String[]{"all", "dogs dogs", "like", "playing", "fetch", "all"};
-    protected IndexSearcher indexSearcher;
-    protected IndexReader indexReader;
-
-
-    public TestTermScorer(String s)
-    {
-        super(s);
+public class TestTermScorer extends LuceneTestCase {
+  protected RAMDirectory directory;
+  private static final String FIELD = "field";
+  
+  protected String[] values = new String[] {"all", "dogs dogs", "like",
+      "playing", "fetch", "all"};
+  protected IndexSearcher indexSearcher;
+  protected IndexReader indexReader;
+  
+  public TestTermScorer(String s) {
+    super(s);
+  }
+  
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    directory = new RAMDirectory();
+    
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    for (int i = 0; i < values.length; i++) {
+      Document doc = new Document();
+      doc
+          .add(new Field(FIELD, values[i], Field.Store.YES,
+              Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    indexReader = writer.getReader();
+    writer.close();
+    indexSearcher = new IndexSearcher(indexReader);
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    indexSearcher.close();
+    indexReader.close();
+    directory.close();
+  }
+
+  public void test() throws IOException {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = termQuery.weight(indexSearcher);
+    
+    Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+    // we have 2 documents with the term all in them, one document for all the
+    // other values
+    final List<TestHit> docs = new ArrayList<TestHit>();
+    // must call next first
+    
+    ts.score(new Collector() {
+      private int base = 0;
+      private Scorer scorer;
+      
+      @Override
+      public void setScorer(Scorer scorer) throws IOException {
+        this.scorer = scorer;
+      }
+      
+      @Override
+      public void collect(int doc) throws IOException {
+        float score = scorer.score();
+        doc = doc + base;
+        docs.add(new TestHit(doc, score));
+        assertTrue("score " + score + " is not greater than 0", score > 0);
+        assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5",
+            doc == 0 || doc == 5);
+      }
+      
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) {
+        base = docBase;
+      }
+      
+      @Override
+      public boolean acceptsDocsOutOfOrder() {
+        return true;
+      }
+    });
+    assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
+    TestHit doc0 = docs.get(0);
+    TestHit doc5 = docs.get(1);
+    // The scores should be the same
+    assertTrue(doc0.score + " does not equal: " + doc5.score,
+        doc0.score == doc5.score);
+    /*
+     * Score should be (based on Default Sim.: All floats are approximate tf = 1
+     * numDocs = 6 docFreq(all) = 2 idf = ln(6/3) + 1 = 1.693147 idf ^ 2 =
+     * 2.8667 boost = 1 lengthNorm = 1 //there is 1 term in every document coord
+     * = 1 sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667
+     * queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590
+     * 
+     * score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69
+     */
+    assertTrue(doc0.score + " does not equal: " + 1.6931472f,
+        doc0.score == 1.6931472f);
+  }
+  
+  public void testNext() throws Exception {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = termQuery.weight(indexSearcher);
+    
+    Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+    assertTrue("next did not return a doc",
+        ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue("score is not correct", ts.score() == 1.6931472f);
+    assertTrue("next did not return a doc",
+        ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue("score is not correct", ts.score() == 1.6931472f);
+    assertTrue("next returned a doc and it should not have",
+        ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
+  }
+  
+  public void testAdvance() throws Exception {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = termQuery.weight(indexSearcher);
+    
+    Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+    assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
+    // The next doc should be doc 5
+    assertTrue("doc should be number 5", ts.docID() == 5);
+  }
+  
+  private class TestHit {
+    public int doc;
+    public float score;
+    
+    public TestHit(int doc, float score) {
+      this.doc = doc;
+      this.score = score;
     }
-
+    
     @Override
-    protected void setUp() throws Exception {
-        super.setUp();
-        directory = new RAMDirectory();
-
-        IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
-        for (int i = 0; i < values.length; i++) {
-            Document doc = new Document();
-            doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.ANALYZED));
-            writer.addDocument(doc);
-        }
-        writer.close();
-        indexSearcher = new IndexSearcher(directory, false);
-        indexReader = indexSearcher.getIndexReader();
-
-
-    }
-
-    public void test() throws IOException {
-
-        Term allTerm = new Term(FIELD, "all");
-        TermQuery termQuery = new TermQuery(allTerm);
-
-        Weight weight = termQuery.weight(indexSearcher);
-
-        Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
-                                  true, true);
-        //we have 2 documents with the term all in them, one document for all the other values
-        final List<TestHit> docs = new ArrayList<TestHit>();
-        //must call next first
-
-
-        ts.score(new Collector() {
-            private int base = 0;
-            private Scorer scorer;
-            @Override
-            public void setScorer(Scorer scorer) throws IOException {
-              this.scorer = scorer; 
-            }
-
-            @Override
-            public void collect(int doc) throws IOException {
-              float score = scorer.score();
-              doc = doc + base;
-              docs.add(new TestHit(doc, score));
-              assertTrue("score " + score + " is not greater than 0", score > 0);
-              assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5",
-                            doc == 0 || doc == 5);
-            }
-            @Override
-            public void setNextReader(IndexReader reader, int docBase) {
-              base = docBase;
-            }
-            @Override
-            public boolean acceptsDocsOutOfOrder() {
-              return true;
-            }
-        });
-        assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
-        TestHit doc0 =  docs.get(0);
-        TestHit doc5 =  docs.get(1);
-        //The scores should be the same
-        assertTrue(doc0.score + " does not equal: " + doc5.score, doc0.score == doc5.score);
-        /*
-        Score should be (based on Default Sim.:
-        All floats are approximate
-        tf = 1
-        numDocs = 6
-        docFreq(all) = 2
-        idf = ln(6/3) + 1 = 1.693147
-        idf ^ 2 = 2.8667
-        boost = 1
-        lengthNorm = 1 //there is 1 term in every document
-        coord = 1
-        sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667
-        queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590
-
-         score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69
-
-        */
-        assertTrue(doc0.score + " does not equal: " + 1.6931472f, doc0.score == 1.6931472f);
-    }
-
-    public void testNext() throws Exception {
-
-        Term allTerm = new Term(FIELD, "all");
-        TermQuery termQuery = new TermQuery(allTerm);
-
-        Weight weight = termQuery.weight(indexSearcher);
-
-        Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
-                                  true, true);
-        assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-        assertTrue("score is not correct", ts.score() == 1.6931472f);
-        assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-        assertTrue("score is not correct", ts.score() == 1.6931472f);
-        assertTrue("next returned a doc and it should not have", ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
-    }
-
-    public void testAdvance() throws Exception {
-
-        Term allTerm = new Term(FIELD, "all");
-        TermQuery termQuery = new TermQuery(allTerm);
-
-        Weight weight = termQuery.weight(indexSearcher);
-
-        Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
-                                  true, true);
-        assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
-        //The next doc should be doc 5
-        assertTrue("doc should be number 5", ts.docID() == 5);
-    }
-
-    private class TestHit {
-        public int doc;
-        public float score;
-
-        public TestHit(int doc, float score) {
-            this.doc = doc;
-            this.score = score;
-        }
-
-        @Override
-        public String toString() {
-            return "TestHit{" + "doc=" + doc + ", score=" + score + "}";
-        }
+    public String toString() {
+      return "TestHit{" + "doc=" + doc + ", score=" + score + "}";
     }
-
+  }
+  
 }

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermVectors.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermVectors.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTermVectors.java Thu Jul 22 19:34:35 2010
@@ -17,6 +17,7 @@ package org.apache.lucene.search;
  * limitations under the License.
  */
 
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
@@ -31,11 +32,16 @@ import org.apache.lucene.util.English;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Random;
 import java.util.SortedSet;
 
 public class TestTermVectors extends LuceneTestCase {
   private IndexSearcher searcher;
+  private IndexReader reader;
   private Directory directory = new MockRAMDirectory();
+
+  private Random random;
+
   public TestTermVectors(String s) {
     super(s);
   }
@@ -43,8 +49,9 @@ public class TestTermVectors extends Luc
   @Override
   protected void setUp() throws Exception {                  
     super.setUp();
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
+    random = newRandom();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
     //writer.setUseCompoundFile(true);
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
@@ -66,45 +73,60 @@ public class TestTermVectors extends Luc
       }
       doc.add(new Field("field", English.intToEnglish(i),
           Field.Store.YES, Field.Index.ANALYZED, termVector));
+      //test no term vectors too
+      doc.add(new Field("noTV", English.intToEnglish(i),
+          Field.Store.YES, Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
+    reader = writer.getReader();
     writer.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
   }
 
   public void test() {
     assertTrue(searcher != null);
   }
 
-  public void testTermVectors() {
+  public void testTermVectors() throws IOException {
     Query query = new TermQuery(new Term("field", "seventy"));
-    try {
-      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
-      assertEquals(100, hits.length);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(100, hits.length);
       
-      for (int i = 0; i < hits.length; i++)
-      {
-        TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
-        assertTrue(vector != null);
-        assertTrue(vector.length == 1);
-      }
-    } catch (IOException e) {
-      assertTrue(false);
+    for (int i = 0; i < hits.length; i++)
+    {
+      TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+      assertTrue(vector != null);
+      assertTrue(vector.length == 1);
     }
+    TermFreqVector vector;
+    vector = searcher.reader.getTermFreqVector(hits[0].doc, "noTV");
+    assertNull(vector);
+
+    TestTermVectorMapper mapper = new TestTermVectorMapper();
+    searcher.reader.getTermFreqVector(hits[0].doc, "noTV", mapper);
+    assertNull(mapper.field);
   }
   
   public void testTermVectorsFieldOrder() throws IOException {
     Directory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
     Document doc = new Document();
     doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     doc.add(new Field("b", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
     writer.close();
-    IndexReader reader = IndexReader.open(dir, true);
     TermFreqVector[] v = reader.getTermFreqVectors(0);
     assertEquals(4, v.length);
     String[] expectedFields = new String[]{"a", "b", "c", "x"};
@@ -112,76 +134,68 @@ public class TestTermVectors extends Luc
     for(int i=0;i<v.length;i++) {
       TermPositionVector posVec = (TermPositionVector) v[i];
       assertEquals(expectedFields[i], posVec.getField());
-      String[] terms = posVec.getTerms();
+      BytesRef[] terms = posVec.getTerms();
       assertEquals(3, terms.length);
-      assertEquals("content", terms[0]);
-      assertEquals("here", terms[1]);
-      assertEquals("some", terms[2]);
+      assertEquals("content", terms[0].utf8ToString());
+      assertEquals("here", terms[1].utf8ToString());
+      assertEquals("some", terms[2].utf8ToString());
       for(int j=0;j<3;j++) {
         int[] positions = posVec.getTermPositions(j);
         assertEquals(1, positions.length);
         assertEquals(expectedPositions[j], positions[0]);
       }
     }
+    reader.close();
+    dir.close();
   }
 
-  public void testTermPositionVectors() {
+  public void testTermPositionVectors() throws IOException {
     Query query = new TermQuery(new Term("field", "zero"));
-    try {
-      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
-      assertEquals(1, hits.length);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    
+    for (int i = 0; i < hits.length; i++) {
+      TermFreqVector[] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+      assertTrue(vector != null);
+      assertTrue(vector.length == 1);
       
-      for (int i = 0; i < hits.length; i++)
-      {
-        TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
-        assertTrue(vector != null);
-        assertTrue(vector.length == 1);
-        
-        boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false;
-        assertTrue((shouldBePosVector == false) || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true)));
-       
-        boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false;
-        assertTrue((shouldBeOffVector == false) || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true)));
+      boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false;
+      assertTrue((shouldBePosVector == false)
+          || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true)));
+      
+      boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false;
+      assertTrue((shouldBeOffVector == false)
+          || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true)));
+      
+      if (shouldBePosVector || shouldBeOffVector) {
+        TermPositionVector posVec = (TermPositionVector) vector[0];
+        BytesRef[] terms = posVec.getTerms();
+        assertTrue(terms != null && terms.length > 0);
         
-        if(shouldBePosVector || shouldBeOffVector){
-          TermPositionVector posVec = (TermPositionVector)vector[0];
-          String [] terms = posVec.getTerms();
-          assertTrue(terms != null && terms.length > 0);
+        for (int j = 0; j < terms.length; j++) {
+          int[] positions = posVec.getTermPositions(j);
+          TermVectorOffsetInfo[] offsets = posVec.getOffsets(j);
           
-          for (int j = 0; j < terms.length; j++) {
-            int [] positions = posVec.getTermPositions(j);
-            TermVectorOffsetInfo [] offsets = posVec.getOffsets(j);
-            
-            if(shouldBePosVector){
-              assertTrue(positions != null);
-              assertTrue(positions.length > 0);
-            }
-            else
-              assertTrue(positions == null);
-            
-            if(shouldBeOffVector){
-              assertTrue(offsets != null);
-              assertTrue(offsets.length > 0);
-            }
-            else
-              assertTrue(offsets == null);
-          }
-        }
-        else{
-          try{
-            assertTrue(false);
-          }
-          catch(ClassCastException ignore){
-            TermFreqVector freqVec = vector[0];
-            String [] terms = freqVec.getTerms();
-            assertTrue(terms != null && terms.length > 0);
-          }
+          if (shouldBePosVector) {
+            assertTrue(positions != null);
+            assertTrue(positions.length > 0);
+          } else assertTrue(positions == null);
           
+          if (shouldBeOffVector) {
+            assertTrue(offsets != null);
+            assertTrue(offsets.length > 0);
+          } else assertTrue(offsets == null);
+        }
+      } else {
+        try {
+          assertTrue(false);
+        } catch (ClassCastException ignore) {
+          TermFreqVector freqVec = vector[0];
+          BytesRef[] terms = freqVec.getTerms();
+          assertTrue(terms != null && terms.length > 0);
         }
-       
+        
       }
-    } catch (IOException e) {
-      assertTrue(false);
     }
   }
   
@@ -204,7 +218,7 @@ public class TestTermVectors extends Luc
     }
   }
 
-  public void testKnownSetOfDocuments() {
+  public void testKnownSetOfDocuments() throws IOException {
     String test1 = "eating chocolate in a computer lab"; //6 terms
     String test2 = "computer in a computer lab"; //5 terms
     String test3 = "a chocolate lab grows old"; //5 terms
@@ -230,34 +244,31 @@ public class TestTermVectors extends Luc
     setupDoc(testDoc3, test3);
     Document testDoc4 = new Document();
     setupDoc(testDoc4, test4);
-        
+    
     Directory dir = new MockRAMDirectory();
     
-    try {
-      IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
-          TEST_VERSION_CURRENT, 
-          new MockAnalyzer(MockTokenizer.SIMPLE, true))
-          .setOpenMode(OpenMode.CREATE));
-      writer.addDocument(testDoc1);
-      writer.addDocument(testDoc2);
-      writer.addDocument(testDoc3);
-      writer.addDocument(testDoc4);
-      writer.close();
-      IndexSearcher knownSearcher = new IndexSearcher(dir, true);
-      TermEnum termEnum = knownSearcher.reader.terms();
-      TermDocs termDocs = knownSearcher.reader.termDocs();
-      //System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
-      
-      //Similarity sim = knownSearcher.getSimilarity();
-      while (termEnum.next() == true)
-      {
-        Term term = termEnum.term();
-        //System.out.println("Term: " + term);
-        termDocs.seek(term);
-        while (termDocs.next())
-        {
-          int docId = termDocs.doc();
-          int freq = termDocs.freq();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true))
+        .setOpenMode(OpenMode.CREATE));
+    writer.addDocument(testDoc1);
+    writer.addDocument(testDoc2);
+    writer.addDocument(testDoc3);
+    writer.addDocument(testDoc4);
+    IndexReader reader = writer.getReader();
+    writer.close();
+    IndexSearcher knownSearcher = new IndexSearcher(reader);
+    FieldsEnum fields = MultiFields.getFields(knownSearcher.reader).iterator();
+    
+    DocsEnum docs = null;
+    while(fields.next() != null) {
+      TermsEnum terms = fields.terms();
+      while(terms.next() != null) {
+        String text = terms.term().utf8ToString();
+        docs = terms.docs(MultiFields.getDeletedDocs(knownSearcher.reader), docs);
+        
+        while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+          int docId = docs.docID();
+          int freq = docs.freq();
           //System.out.println("Doc Id: " + docId + " freq " + freq);
           TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
           //float tf = sim.tf(freq);
@@ -268,76 +279,74 @@ public class TestTermVectors extends Luc
           //float coord = sim.coord()
           //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
           assertTrue(vector != null);
-          String[] vTerms = vector.getTerms();
+          BytesRef[] vTerms = vector.getTerms();
           int [] freqs = vector.getTermFrequencies();
           for (int i = 0; i < vTerms.length; i++)
           {
-            if (term.text().equals(vTerms[i]))
+            if (text.equals(vTerms[i].utf8ToString()))
             {
               assertTrue(freqs[i] == freq);
             }
           }
-          
         }
-        //System.out.println("--------");
       }
-      Query query = new TermQuery(new Term("field", "chocolate"));
-      ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
-      //doc 3 should be the first hit b/c it is the shortest match
-      assertTrue(hits.length == 3);
-      /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
+      //System.out.println("--------");
+    }
+    Query query = new TermQuery(new Term("field", "chocolate"));
+    ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
+    //doc 3 should be the first hit b/c it is the shortest match
+    assertTrue(hits.length == 3);
+    /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
       System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
       System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
       System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
       System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " +  hits.doc(2).toString());
       System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
-      assertTrue(hits[0].doc == 2);
-      assertTrue(hits[1].doc == 3);
-      assertTrue(hits[2].doc == 0);
-      TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field");
-      assertTrue(vector != null);
-      //System.out.println("Vector: " + vector);
-      String[] terms = vector.getTerms();
-      int [] freqs = vector.getTermFrequencies();
-      assertTrue(terms != null && terms.length == 10);
-      for (int i = 0; i < terms.length; i++) {
-        String term = terms[i];
-        //System.out.println("Term: " + term);
-        int freq = freqs[i];
-        assertTrue(test4.indexOf(term) != -1);
-        Integer freqInt = test4Map.get(term);
-        assertTrue(freqInt != null);
-        assertTrue(freqInt.intValue() == freq);        
-      }
-      SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
-      knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
-      SortedSet<TermVectorEntry> vectorEntrySet = mapper.getTermVectorEntrySet();
-      assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
-      TermVectorEntry last = null;
-      for (final TermVectorEntry tve : vectorEntrySet) {
-        if (tve != null && last != null)
-        {
-          assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
-          Integer expectedFreq =  test4Map.get(tve.getTerm());
-          //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
-          assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
-        }
-        last = tve;
-
+    assertTrue(hits[0].doc == 2);
+    assertTrue(hits[1].doc == 3);
+    assertTrue(hits[2].doc == 0);
+    TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field");
+    assertTrue(vector != null);
+    //System.out.println("Vector: " + vector);
+    BytesRef[] terms = vector.getTerms();
+    int [] freqs = vector.getTermFrequencies();
+    assertTrue(terms != null && terms.length == 10);
+    for (int i = 0; i < terms.length; i++) {
+      String term = terms[i].utf8ToString();
+      //System.out.println("Term: " + term);
+      int freq = freqs[i];
+      assertTrue(test4.indexOf(term) != -1);
+      Integer freqInt = test4Map.get(term);
+      assertTrue(freqInt != null);
+      assertTrue(freqInt.intValue() == freq);        
+    }
+    SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
+    SortedSet<TermVectorEntry> vectorEntrySet = mapper.getTermVectorEntrySet();
+    assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
+    TermVectorEntry last = null;
+    for (final TermVectorEntry tve : vectorEntrySet) {
+      if (tve != null && last != null)
+      {
+        assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
+        Integer expectedFreq =  test4Map.get(tve.getTerm().utf8ToString());
+        //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
+        assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
       }
-
-      FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
-      knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
-      Map<String,SortedSet<TermVectorEntry>> map = fieldMapper.getFieldToTerms();
-      assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
-      vectorEntrySet = map.get("field");
-      assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
-      assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
-      knownSearcher.close();
-    } catch (IOException e) {
-      e.printStackTrace();
-      assertTrue(false);
+      last = tve;
+      
     }
+    
+    FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
+    Map<String,SortedSet<TermVectorEntry>> map = fieldMapper.getFieldToTerms();
+    assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
+    vectorEntrySet = map.get("field");
+    assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
+    assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
+    knownSearcher.close();
+    reader.close();
+    dir.close();
   } 
   
   private void setupDoc(Document doc, String text)
@@ -351,8 +360,8 @@ public class TestTermVectors extends Luc
 
   // Test only a few docs having vectors
   public void testRareVectors() throws IOException {
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true))
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true))
         .setOpenMode(OpenMode.CREATE));
     for (int i = 0; i < 100; i++) {
       Document doc = new Document();
@@ -367,8 +376,9 @@ public class TestTermVectors extends Luc
       writer.addDocument(doc);
     }
 
+    IndexReader reader = writer.getReader();
     writer.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
 
     Query query = new TermQuery(new Term("field", "hundred"));
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -378,14 +388,15 @@ public class TestTermVectors extends Luc
       assertTrue(vector != null);
       assertTrue(vector.length == 1);
     }
+    reader.close();
   }
 
 
   // In a single doc, for the same field, mix the term
   // vectors up
   public void testMixedVectrosVectors() throws IOException {
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, 
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, 
         new MockAnalyzer(MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE));
     Document doc = new Document();
     doc.add(new Field("field", "one",
@@ -399,9 +410,10 @@ public class TestTermVectors extends Luc
     doc.add(new Field("field", "one",
                       Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
     writer.close();
 
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
 
     Query query = new TermQuery(new Term("field", "one"));
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -412,9 +424,9 @@ public class TestTermVectors extends Luc
     assertTrue(vector.length == 1);
     TermPositionVector tfv = (TermPositionVector) vector[0];
     assertTrue(tfv.getField().equals("field"));
-    String[] terms = tfv.getTerms();
+    BytesRef[] terms = tfv.getTerms();
     assertEquals(1, terms.length);
-    assertEquals(terms[0], "one");
+    assertEquals(terms[0].utf8ToString(), "one");
     assertEquals(5, tfv.getTermFrequencies()[0]);
 
     int[] positions = tfv.getTermPositions(0);
@@ -427,5 +439,20 @@ public class TestTermVectors extends Luc
       assertEquals(4*i, offsets[i].getStartOffset());
       assertEquals(4*i+3, offsets[i].getEndOffset());
     }
+    reader.close();
+  }
+
+  private static class TestTermVectorMapper extends TermVectorMapper {
+    public String field = null;
+
+    @Override
+    public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
+      this.field = field;
+    }
+
+    @Override
+    public void map(BytesRef term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
+
+    }
   }
 }

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java Thu Jul 22 19:34:35 2010
@@ -24,8 +24,8 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.TimeLimitingCollector.TimeExceededException;
 import org.apache.lucene.store.Directory;
@@ -51,6 +51,9 @@ public class TestTimeLimitingCollector e
   private static final int N_THREADS = 50;
 
   private Searcher searcher;
+  private Directory directory;
+  private IndexReader reader;
+
   private final String FIELD_NAME = "body";
   private Query query;
 
@@ -74,14 +77,16 @@ public class TestTimeLimitingCollector e
         "blueberry strudel",
         "blueberry pizza",
     };
-    Directory directory = new RAMDirectory();
-    IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    directory = new RAMDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
     
     for (int i=0; i<N_DOCS; i++) {
       add(docText[i%docText.length], iw);
     }
+    reader = iw.getReader();
     iw.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
 
     String qtxt = "one";
     // start from 1, so that the 0th doc never matches
@@ -99,10 +104,12 @@ public class TestTimeLimitingCollector e
   @Override
   protected void tearDown() throws Exception {
     searcher.close();
+    reader.close();
+    directory.close();
     super.tearDown();
   }
 
-  private void add(String value, IndexWriter iw) throws IOException {
+  private void add(String value, RandomIndexWriter iw) throws IOException {
     Document d = new Document();
     d.add(new Field(FIELD_NAME, value, Field.Store.NO, Field.Index.ANALYZED));
     iw.addDocument(d);

Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java?rev=966819&r1=966818&r2=966819&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java Thu Jul 22 19:34:35 2010
@@ -22,8 +22,8 @@ import java.io.IOException;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -93,10 +93,11 @@ public class TestTopDocsCollector extend
   private static final float MAX_SCORE = 9.17561f;
   
   private Directory dir = new RAMDirectory();
+  private IndexReader reader;
 
   private TopDocsCollector<ScoreDoc> doSearch(int numResults) throws IOException {
     Query q = new MatchAllDocsQuery();
-    IndexSearcher searcher = new IndexSearcher(dir, true);
+    IndexSearcher searcher = new IndexSearcher(reader);
     TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(numResults);
     searcher.search(q, tdc);
     searcher.close();
@@ -109,15 +110,17 @@ public class TestTopDocsCollector extend
     
     // populate an index with 30 documents, this should be enough for the test.
     // The documents have no content - the test uses MatchAllDocsQuery().
-    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
     for (int i = 0; i < 30; i++) {
       writer.addDocument(new Document());
     }
+    reader = writer.getReader();
     writer.close();
   }
   
   @Override
   protected void tearDown() throws Exception {
+    reader.close();
     dir.close();
     dir = null;
     super.tearDown();



Mime
View raw message