Return-Path: Delivered-To: apmail-lucene-commits-archive@www.apache.org Received: (qmail 34892 invoked from network); 22 Jul 2010 19:36:38 -0000 Received: from unknown (HELO mail.apache.org) (140.211.11.3) by 140.211.11.9 with SMTP; 22 Jul 2010 19:36:38 -0000 Received: (qmail 68744 invoked by uid 500); 22 Jul 2010 19:36:38 -0000 Mailing-List: contact commits-help@lucene.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@lucene.apache.org Delivered-To: mailing list commits@lucene.apache.org Received: (qmail 68737 invoked by uid 99); 22 Jul 2010 19:36:38 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 22 Jul 2010 19:36:38 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED,T_FRT_STOCK2 X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 22 Jul 2010 19:36:18 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 90A922388C14; Thu, 22 Jul 2010 19:35:00 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r966819 [9/20] - in /lucene/dev/branches/realtime_search: ./ lucene/ lucene/backwards/ lucene/contrib/ lucene/contrib/benchmark/conf/ lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ lucene/contrib/benchmark/src/j... Date: Thu, 22 Jul 2010 19:34:52 -0000 To: commits@lucene.apache.org From: buschmi@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20100722193500.90A922388C14@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/document/TestDocument.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/document/TestDocument.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/document/TestDocument.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/document/TestDocument.java Thu Jul 22 19:34:35 2010 @@ -1,8 +1,9 @@ package org.apache.lucene.document; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -32,17 +33,15 @@ import org.apache.lucene.util.LuceneTest /** * Tests {@link Document} class. */ -public class TestDocument extends LuceneTestCase -{ - +public class TestDocument extends LuceneTestCase { + String binaryVal = "this text will be stored as a byte array in the index"; String binaryVal2 = "this text will be also stored as a byte array in the index"; - public void testBinaryField() - throws Exception - { + public void testBinaryField() throws Exception { Document doc = new Document(); - Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, Field.Index.NO); + Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, + Field.Index.NO); Fieldable binaryFld = new Field("binary", binaryVal.getBytes()); Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes()); @@ -67,7 +66,7 @@ public class TestDocument extends Lucene assertEquals(3, doc.fields.size()); byte[][] binaryTests = doc.getBinaryValues("binary"); - + assertEquals(2, binaryTests.length); binaryTest = new String(binaryTests[0]); @@ -88,17 +87,17 @@ public class TestDocument extends Lucene /** * Tests {@link Document#removeField(String)} method for a brand new Document * that has not been indexed yet. - * + * * @throws Exception on error */ - public void testRemoveForNewDocument() throws Exception - { + public void testRemoveForNewDocument() throws Exception { Document doc = makeDocumentWithFields(); assertEquals(8, doc.fields.size()); doc.removeFields("keyword"); assertEquals(6, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored - doc.removeFields("keyword"); // removing a field more than once + doc.removeFields("doesnotexists"); // removing non-existing fields is + // siltenlty ignored + doc.removeFields("keyword"); // removing a field more than once assertEquals(6, doc.fields.size()); doc.removeField("text"); assertEquals(5, doc.fields.size()); @@ -106,164 +105,171 @@ public class TestDocument extends Lucene assertEquals(4, doc.fields.size()); doc.removeField("text"); assertEquals(4, doc.fields.size()); - doc.removeField("doesnotexists"); // removing non-existing fields is siltenlty ignored + doc.removeField("doesnotexists"); // removing non-existing fields is + // siltenlty ignored assertEquals(4, doc.fields.size()); doc.removeFields("unindexed"); assertEquals(2, doc.fields.size()); doc.removeFields("unstored"); assertEquals(0, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored + doc.removeFields("doesnotexists"); // removing non-existing fields is + // siltenlty ignored assertEquals(0, doc.fields.size()); } - - public void testConstructorExceptions() - { - new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay - new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay + + public void testConstructorExceptions() { + new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay + new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay try { new Field("name", "value", Field.Store.NO, Field.Index.NO); fail(); - } catch(IllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected exception } - new Field("name", "value", Field.Store.YES, Field.Index.NO, Field.TermVector.NO); // okay + new Field("name", "value", Field.Store.YES, Field.Index.NO, + Field.TermVector.NO); // okay try { - new Field("name", "value", Field.Store.YES, Field.Index.NO, Field.TermVector.YES); + new Field("name", "value", Field.Store.YES, Field.Index.NO, + Field.TermVector.YES); fail(); - } catch(IllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected exception } } - /** - * Tests {@link Document#getValues(String)} method for a brand new Document - * that has not been indexed yet. - * - * @throws Exception on error - */ - public void testGetValuesForNewDocument() throws Exception - { - doAssert(makeDocumentWithFields(), false); - } - - /** - * Tests {@link Document#getValues(String)} method for a Document retrieved from - * an index. - * - * @throws Exception on error - */ - public void testGetValuesForIndexedDocument() throws Exception { - RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); - writer.addDocument(makeDocumentWithFields()); - writer.close(); - - Searcher searcher = new IndexSearcher(dir, true); - - // search for something that does exists - Query query = new TermQuery(new Term("keyword", "test1")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(1, hits.length); - - doAssert(searcher.doc(hits[0].doc), true); - searcher.close(); - } - - private Document makeDocumentWithFields() - { - Document doc = new Document(); - doc.add(new Field( "keyword", "test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field( "keyword", "test2", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field( "text", "test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field( "text", "test2", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO)); - doc.add(new Field( "unstored", "test1", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(new Field( "unstored", "test2", Field.Store.NO, Field.Index.ANALYZED)); - return doc; + /** + * Tests {@link Document#getValues(String)} method for a brand new Document + * that has not been indexed yet. + * + * @throws Exception on error + */ + public void testGetValuesForNewDocument() throws Exception { + doAssert(makeDocumentWithFields(), false); + } + + /** + * Tests {@link Document#getValues(String)} method for a Document retrieved + * from an index. + * + * @throws Exception on error + */ + public void testGetValuesForIndexedDocument() throws Exception { + RAMDirectory dir = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + writer.addDocument(makeDocumentWithFields()); + IndexReader reader = writer.getReader(); + + Searcher searcher = new IndexSearcher(reader); + + // search for something that does exists + Query query = new TermQuery(new Term("keyword", "test1")); + + // ensure that queries return expected results without DateFilter first + ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; + assertEquals(1, hits.length); + + doAssert(searcher.doc(hits[0].doc), true); + writer.close(); + searcher.close(); + reader.close(); + dir.close(); + } + + private Document makeDocumentWithFields() { + Document doc = new Document(); + doc.add(new Field("keyword", "test1", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("keyword", "test2", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO)); + doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO)); + doc + .add(new Field("unstored", "test1", Field.Store.NO, + Field.Index.ANALYZED)); + doc + .add(new Field("unstored", "test2", Field.Store.NO, + Field.Index.ANALYZED)); + return doc; + } + + private void doAssert(Document doc, boolean fromIndex) { + String[] keywordFieldValues = doc.getValues("keyword"); + String[] textFieldValues = doc.getValues("text"); + String[] unindexedFieldValues = doc.getValues("unindexed"); + String[] unstoredFieldValues = doc.getValues("unstored"); + + assertTrue(keywordFieldValues.length == 2); + assertTrue(textFieldValues.length == 2); + assertTrue(unindexedFieldValues.length == 2); + // this test cannot work for documents retrieved from the index + // since unstored fields will obviously not be returned + if (!fromIndex) { + assertTrue(unstoredFieldValues.length == 2); } - - private void doAssert(Document doc, boolean fromIndex) - { - String[] keywordFieldValues = doc.getValues("keyword"); - String[] textFieldValues = doc.getValues("text"); - String[] unindexedFieldValues = doc.getValues("unindexed"); - String[] unstoredFieldValues = doc.getValues("unstored"); - - assertTrue(keywordFieldValues.length == 2); - assertTrue(textFieldValues.length == 2); - assertTrue(unindexedFieldValues.length == 2); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (! fromIndex) - { - assertTrue(unstoredFieldValues.length == 2); - } - - assertTrue(keywordFieldValues[0].equals("test1")); - assertTrue(keywordFieldValues[1].equals("test2")); - assertTrue(textFieldValues[0].equals("test1")); - assertTrue(textFieldValues[1].equals("test2")); - assertTrue(unindexedFieldValues[0].equals("test1")); - assertTrue(unindexedFieldValues[1].equals("test2")); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (! fromIndex) - { - assertTrue(unstoredFieldValues[0].equals("test1")); - assertTrue(unstoredFieldValues[1].equals("test2")); - } + + assertTrue(keywordFieldValues[0].equals("test1")); + assertTrue(keywordFieldValues[1].equals("test2")); + assertTrue(textFieldValues[0].equals("test1")); + assertTrue(textFieldValues[1].equals("test2")); + assertTrue(unindexedFieldValues[0].equals("test1")); + assertTrue(unindexedFieldValues[1].equals("test2")); + // this test cannot work for documents retrieved from the index + // since unstored fields will obviously not be returned + if (!fromIndex) { + assertTrue(unstoredFieldValues[0].equals("test1")); + assertTrue(unstoredFieldValues[1].equals("test2")); } - - public void testFieldSetValue() throws Exception { - - Field field = new Field("id", "id1", Field.Store.YES, Field.Index.NOT_ANALYZED); - Document doc = new Document(); - doc.add(field); - doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED)); - - RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); - writer.addDocument(doc); - field.setValue("id2"); - writer.addDocument(doc); - field.setValue("id3"); - writer.addDocument(doc); - writer.close(); - - Searcher searcher = new IndexSearcher(dir, true); - - Query query = new TermQuery(new Term("keyword", "test")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(3, hits.length); - int result = 0; - for(int i=0;i<3;i++) { - Document doc2 = searcher.doc(hits[i].doc); - Field f = doc2.getField("id"); - if (f.stringValue().equals("id1")) - result |= 1; - else if (f.stringValue().equals("id2")) - result |= 2; - else if (f.stringValue().equals("id3")) - result |= 4; - else - fail("unexpected id field"); - } - searcher.close(); - dir.close(); - assertEquals("did not see all IDs", 7, result); + } + + public void testFieldSetValue() throws Exception { + + Field field = new Field("id", "id1", Field.Store.YES, + Field.Index.NOT_ANALYZED); + Document doc = new Document(); + doc.add(field); + doc.add(new Field("keyword", "test", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + + RAMDirectory dir = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + writer.addDocument(doc); + field.setValue("id2"); + writer.addDocument(doc); + field.setValue("id3"); + writer.addDocument(doc); + + IndexReader reader = writer.getReader(); + Searcher searcher = new IndexSearcher(reader); + + Query query = new TermQuery(new Term("keyword", "test")); + + // ensure that queries return expected results without DateFilter first + ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; + assertEquals(3, hits.length); + int result = 0; + for (int i = 0; i < 3; i++) { + Document doc2 = searcher.doc(hits[i].doc); + Field f = doc2.getField("id"); + if (f.stringValue().equals("id1")) result |= 1; + else if (f.stringValue().equals("id2")) result |= 2; + else if (f.stringValue().equals("id3")) result |= 4; + else fail("unexpected id field"); } - + writer.close(); + searcher.close(); + reader.close(); + dir.close(); + assertEquals("did not see all IDs", 7, result); + } + public void testFieldSetValueChangeBinary() { Field field1 = new Field("field1", new byte[0]); - Field field2 = new Field("field2", "", - Field.Store.YES, Field.Index.ANALYZED); + Field field2 = new Field("field2", "", Field.Store.YES, + Field.Index.ANALYZED); try { field1.setValue("abc"); fail("did not hit expected exception"); Propchange: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/document/TestNumberTools.java ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Thu Jul 22 19:34:35 2010 @@ -1,4 +1,5 @@ -/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/document/TestNumberTools.java:943137,949730 +/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/document/TestNumberTools.java:943137,949730,957490,960490,961612 +/lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestNumberTools.java:953476-966816 /lucene/java/branches/flex_1458/src/test/org/apache/lucene/document/TestNumberTools.java:824912-931101 /lucene/java/branches/lucene_2_4/src/test/org/apache/lucene/document/TestNumberTools.java:748824 /lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/document/TestNumberTools.java:829134,829881,831036,896850,909334,948516 Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Thu Jul 22 19:34:35 2010 @@ -464,7 +464,7 @@ public class TestAddIndexes extends Luce private void verifyTermDocs(Directory dir, Term term, int numDocs) throws IOException { IndexReader reader = IndexReader.open(dir, true); - DocsEnum docsEnum = MultiFields.getTermDocsEnum(reader, null, term.field, new BytesRef(term.text)); + DocsEnum docsEnum = MultiFields.getTermDocsEnum(reader, null, term.field, term.bytes); int count = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) count++; Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Thu Jul 22 19:34:35 2010 @@ -23,10 +23,12 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; import java.util.Arrays; -import java.util.Random; import java.util.Enumeration; import java.util.List; +import java.util.Random; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; @@ -128,21 +130,86 @@ public class TestBackwardsCompatibility "31.nocfs", }; + final String[] unsupportedNames = {"19.cfs", + "19.nocfs", + "20.cfs", + "20.nocfs", + "21.cfs", + "21.nocfs", + "22.cfs", + "22.nocfs", + "23.cfs", + "23.nocfs", + "24.cfs", + "24.nocfs", + "29.cfs", + "29.nocfs", + }; + + /** This test checks that *only* IndexFormatTooOldExceptions are throws when you open and operate on too old indexes! */ + public void testUnsupportedOldIndexes() throws Exception { + final Random rnd = newRandom(); + for(int i=0;i fields = d.getFields(); - if (!oldName.startsWith("19.") && - !oldName.startsWith("20.") && - !oldName.startsWith("21.") && - !oldName.startsWith("22.")) { - - if (d.getField("content3") == null) { - final int numFields = 5; - assertEquals(numFields, fields.size()); - Field f = d.getField("id"); - assertEquals(""+i, f.stringValue()); - - f = d.getField("utf8"); - assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue()); + if (d.getField("content3") == null) { + final int numFields = 5; + assertEquals(numFields, fields.size()); + Field f = d.getField("id"); + assertEquals(""+i, f.stringValue()); + + f = d.getField("utf8"); + assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue()); - f = d.getField("autf8"); - assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue()); - - f = d.getField("content2"); - assertEquals("here is more content with aaa aaa aaa", f.stringValue()); + f = d.getField("autf8"); + assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue()); + + f = d.getField("content2"); + assertEquals("here is more content with aaa aaa aaa", f.stringValue()); - f = d.getField("fie\u2C77ld"); - assertEquals("field with non-ascii name", f.stringValue()); - } - } + f = d.getField("fie\u2C77ld"); + assertEquals("field with non-ascii name", f.stringValue()); + } } else // Only ID 7 is deleted assertEquals(7, i); @@ -279,18 +340,12 @@ public class TestBackwardsCompatibility doTestHits(hits, 34, searcher.getIndexReader()); - if (!oldName.startsWith("19.") && - !oldName.startsWith("20.") && - !oldName.startsWith("21.") && - !oldName.startsWith("22.")) { - // Test on indices >= 2.3 - hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs; - assertEquals(34, hits.length); - hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs; - assertEquals(34, hits.length); - hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs; - assertEquals(34, hits.length); - } + hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs; + assertEquals(34, hits.length); + hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs; + assertEquals(34, hits.length); + hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs; + assertEquals(34, hits.length); searcher.close(); dir.close(); @@ -570,12 +625,6 @@ public class TestBackwardsCompatibility return new File(TEMP_DIR, dirName).getCanonicalPath(); } - static final String TEXT_TO_COMPRESS = "this is a compressed field and should appear in 3.0 as an uncompressed field after merge"; - - // FieldSelectorResult.SIZE returns compressed size for compressed fields, which are internally handled as binary; - // do it in the same way like FieldsWriter, do not use CompressionTools.compressString() for compressed fields: - static final byte[] BINARY_TO_COMPRESS = new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20}; - private int countDocs(DocsEnum docs) throws IOException { int count = 0; while((docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { Propchange: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Thu Jul 22 19:34:35 2010 @@ -1,4 +1,5 @@ -/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:943137,949730 +/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:943137,949730,957490,960490,961612 +/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:953476-966816 /lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:824912-931101 /lucene/java/branches/lucene_2_4/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:748824 /lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:829134,829881,831036,896850,909334,948516 Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestByteSlices.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestByteSlices.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestByteSlices.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestByteSlices.java Thu Jul 22 19:34:35 2010 @@ -55,7 +55,7 @@ public class TestByteSlices extends Luce public void testBasic() throws Throwable { ByteBlockPool pool = new ByteBlockPool(new ByteBlockAllocator()); - final int NUM_STREAM = 25; + final int NUM_STREAM = 100*_TestUtil.getRandomMultiplier(); ByteSliceWriter writer = new ByteSliceWriter(pool); @@ -91,8 +91,9 @@ public class TestByteSlices extends Luce for(int j=0;j 0); } - public void testAllTermDocs() throws IOException { - IndexReader reader = openReader(); - int NUM_DOCS = 2; - TermDocs td = reader.termDocs(null); - for(int i=0;i loadFieldNames = new HashSet(); + loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY); + loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY); + Set lazyFieldNames = new HashSet(); + //new String[]{DocHelper.LARGE_LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_BINARY_KEY}; + lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY); + lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY); + lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY); + lazyFieldNames.add(DocHelper.TEXT_FIELD_UTF2_KEY); + + // Use LATENT instead of LAZY + SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames) { + public FieldSelectorResult accept(String fieldName) { + final FieldSelectorResult result = super.accept(fieldName); + if (result == FieldSelectorResult.LAZY_LOAD) { + return FieldSelectorResult.LATENT; + } else { + return result; + } + } + }; + + Document doc = reader.doc(0, fieldSelector); + assertTrue("doc is null and it shouldn't be", doc != null); + Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY); + assertTrue("field is null and it shouldn't be", field != null); + assertTrue("field is not lazy and it should be", field.isLazy()); + String value = field.stringValue(); + assertTrue("value is null and it shouldn't be", value != null); + assertTrue(value + " is not equal to " + DocHelper.LAZY_FIELD_TEXT, value.equals(DocHelper.LAZY_FIELD_TEXT) == true); + assertTrue("calling stringValue() twice should give different references", field.stringValue() != field.stringValue()); + + field = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY); + assertTrue("field is null and it shouldn't be", field != null); + assertTrue("Field is lazy and it should not be", field.isLazy() == false); + assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue()); + + field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF1_KEY); + assertTrue("field is null and it shouldn't be", field != null); + assertTrue("Field is lazy and it should not be", field.isLazy() == false); + assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF1_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF1_TEXT) == true); + assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue()); + + field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF2_KEY); + assertTrue("field is null and it shouldn't be", field != null); + assertTrue("Field is lazy and it should not be", field.isLazy() == true); + assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF2_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF2_TEXT) == true); + assertTrue("calling stringValue() twice should give different references", field.stringValue() != field.stringValue()); + + field = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY); + assertTrue("field is null and it shouldn't be", field != null); + assertTrue("stringValue isn't null for lazy binary field", field.stringValue() == null); + assertTrue("calling binaryValue() twice should give different references", field.getBinaryValue() != field.getBinaryValue()); + + byte [] bytes = field.getBinaryValue(); + assertTrue("bytes is null and it shouldn't be", bytes != null); + assertTrue("", DocHelper.LAZY_FIELD_BINARY_BYTES.length == bytes.length); + for (int i = 0; i < bytes.length; i++) { + assertTrue("byte[" + i + "] is mismatched", bytes[i] == DocHelper.LAZY_FIELD_BINARY_BYTES[i]); + + } + } + + + + public void testLazyFieldsAfterClose() throws Exception { assertTrue(dir != null); assertTrue(fieldInfos != null); Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java Thu Jul 22 19:34:35 2010 @@ -159,26 +159,19 @@ public class TestFilterIndexReader exten assertTrue(reader.isOptimized()); - TermEnum terms = reader.terms(); - while (terms.next()) { - assertTrue(terms.term().text().indexOf('e') != -1); + TermsEnum terms = MultiFields.getTerms(reader, "default").iterator(); + while (terms.next() != null) { + assertTrue(terms.term().utf8ToString().indexOf('e') != -1); } - terms.close(); - TermPositions positions = reader.termPositions(new Term("default", "one")); - while (positions.next()) { - assertTrue((positions.doc() % 2) == 1); + assertEquals(TermsEnum.SeekStatus.FOUND, terms.seek(new BytesRef("one"))); + + DocsAndPositionsEnum positions = terms.docsAndPositions(MultiFields.getDeletedDocs(reader), + null); + while (positions.nextDoc() != DocsEnum.NO_MORE_DOCS) { + assertTrue((positions.docID() % 2) == 1); } - int NUM_DOCS = 3; - - TermDocs td = reader.termDocs(null); - for(int i=0;i 0); @@ -3454,8 +3465,13 @@ public class TestIndexWriter extends Luc Query q = new SpanTermQuery(new Term("field", "a")); hits = s.search(q, null, 1000).scoreDocs; assertEquals(1, hits.length); - TermPositions tps = s.getIndexReader().termPositions(new Term("field", "a")); - assertTrue(tps.next()); + + DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(), + MultiFields.getDeletedDocs(s.getIndexReader()), + "field", + new BytesRef("a")); + + assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(1, tps.freq()); assertEquals(0, tps.nextPosition()); w.close(); @@ -4465,12 +4481,12 @@ public class TestIndexWriter extends Luc // test that the terms were indexed. - assertTrue(ir.termDocs(new Term("binary","doc1field1")).next()); - assertTrue(ir.termDocs(new Term("binary","doc2field1")).next()); - assertTrue(ir.termDocs(new Term("binary","doc3field1")).next()); - assertTrue(ir.termDocs(new Term("string","doc1field2")).next()); - assertTrue(ir.termDocs(new Term("string","doc2field2")).next()); - assertTrue(ir.termDocs(new Term("string","doc3field2")).next()); + assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc1field1")).nextDoc() != DocsEnum.NO_MORE_DOCS); + assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc2field1")).nextDoc() != DocsEnum.NO_MORE_DOCS); + assertTrue(MultiFields.getTermDocsEnum(ir, null, "binary", new BytesRef("doc3field1")).nextDoc() != DocsEnum.NO_MORE_DOCS); + assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc1field2")).nextDoc() != DocsEnum.NO_MORE_DOCS); + assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc2field2")).nextDoc() != DocsEnum.NO_MORE_DOCS); + assertTrue(MultiFields.getTermDocsEnum(ir, null, "string", new BytesRef("doc3field2")).nextDoc() != DocsEnum.NO_MORE_DOCS); ir.close(); dir.close(); @@ -4606,38 +4622,22 @@ public class TestIndexWriter extends Luc private void checkTermsOrder(IndexReader r, Set allTerms, boolean isTop) throws IOException { TermsEnum terms = MultiFields.getFields(r).terms("f").iterator(); - char[] last = new char[2]; - int lastLength = 0; + BytesRef last = new BytesRef(); Set seenTerms = new HashSet(); - UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result(); while(true) { final BytesRef term = terms.next(); if (term == null) { break; } - UnicodeUtil.UTF8toUTF16(term.bytes, term.offset, term.length, utf16); - assertTrue(utf16.length <= 2); - // Make sure last term comes before current one, in - // UTF16 sort order - int i = 0; - for(i=0;i commitData = new HashMap(); + commitData.put("tag", "first"); + w.commit(commitData); + + // commit to "second" + w.addDocument(doc); + commitData.put("tag", "second"); + w.commit(commitData); + w.close(); + + // open "first" with IndexWriter + IndexCommit commit = null; + for(IndexCommit c : IndexReader.listCommits(dir)) { + if (c.getUserData().get("tag").equals("first")) { + commit = c; + break; + } + } + + assertNotNull(commit); + + w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit)); + + assertEquals(1, w.numDocs()); + + // commit IndexWriter to "third" + w.addDocument(doc); + commitData.put("tag", "third"); + w.commit(commitData); + w.close(); + + // make sure "second" commit is still there + commit = null; + for(IndexCommit c : IndexReader.listCommits(dir)) { + if (c.getUserData().get("tag").equals("second")) { + commit = c; + break; + } + } + + assertNotNull(commit); + + IndexReader r = IndexReader.open(commit, true); + assertEquals(2, r.numDocs()); + r.close(); + + // open "second", w/ writeable IndexReader & commit + r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false); + assertEquals(2, r.numDocs()); + r.deleteDocument(0); + r.deleteDocument(1); + commitData.put("tag", "fourth"); + r.commit(commitData); + r.close(); + + // make sure "third" commit is still there + commit = null; + for(IndexCommit c : IndexReader.listCommits(dir)) { + if (c.getUserData().get("tag").equals("third")) { + commit = c; + break; + } + } + assertNotNull(commit); + + dir.close(); + } } Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java Thu Jul 22 19:34:35 2010 @@ -38,6 +38,7 @@ import org.apache.lucene.store.MockRAMDi import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ThreadInterruptedException; public class TestIndexWriterReader extends LuceneTestCase { @@ -63,12 +64,16 @@ public class TestIndexWriterReader exten public static int count(Term t, IndexReader r) throws IOException { int count = 0; - TermDocs td = r.termDocs(t); - while (td.next()) { - td.doc(); - count++; + DocsEnum td = MultiFields.getTermDocsEnum(r, + MultiFields.getDeletedDocs(r), + t.field(), new BytesRef(t.text())); + + if (td != null) { + while (td.nextDoc() != DocsEnum.NO_MORE_DOCS) { + td.docID(); + count++; + } } - td.close(); return count; } Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java Thu Jul 22 19:34:35 2010 @@ -30,6 +30,7 @@ import org.apache.lucene.store.Directory import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.BytesRef; /** * Tests lazy skipping on the proximity file. @@ -127,17 +128,26 @@ public class TestLazyProxSkipping extend writer.close(); IndexReader reader = IndexReader.open(directory, true); - TermPositions tp = reader.termPositions(); - tp.seek(new Term(this.field, "b")); + + DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader, + MultiFields.getDeletedDocs(reader), + this.field, + new BytesRef("b")); + for (int i = 0; i < 10; i++) { - tp.next(); - assertEquals(tp.doc(), i); + tp.nextDoc(); + assertEquals(tp.docID(), i); assertEquals(tp.nextPosition(), 1); } - tp.seek(new Term(this.field, "a")); + + tp = MultiFields.getTermPositionsEnum(reader, + MultiFields.getDeletedDocs(reader), + this.field, + new BytesRef("a")); + for (int i = 0; i < 10; i++) { - tp.next(); - assertEquals(tp.doc(), i); + tp.nextDoc(); + assertEquals(tp.docID(), i); assertEquals(tp.nextPosition(), 0); } Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMultiFields.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMultiFields.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMultiFields.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMultiFields.java Thu Jul 22 19:34:35 2010 @@ -37,7 +37,7 @@ public class TestMultiFields extends Luc Set deleted = new HashSet(); List terms = new ArrayList(); - int numDocs = r.nextInt(100*_TestUtil.getRandomMultiplier()); + int numDocs = _TestUtil.nextInt(r, 1, 100*_TestUtil.getRandomMultiplier()); Document doc = new Document(); Field f = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); doc.add(f); @@ -88,14 +88,13 @@ public class TestMultiFields extends Luc BytesRef term = terms.get(r.nextInt(terms.size())); DocsEnum docsEnum = terms2.docs(delDocs, term, null); - int count = 0; + assertNotNull(docsEnum); + for(int docID : docs.get(term)) { if (!deleted.contains(docID)) { assertEquals(docID, docsEnum.nextDoc()); - count++; } } - //System.out.println("c=" + count + " t=" + term); assertEquals(docsEnum.NO_MORE_DOCS, docsEnum.nextDoc()); } @@ -104,6 +103,7 @@ public class TestMultiFields extends Luc } } + /* private void verify(IndexReader r, String term, List expected) throws Exception { DocsEnum docs = MultiFields.getTermDocsEnum(r, MultiFields.getDeletedDocs(r), @@ -115,4 +115,23 @@ public class TestMultiFields extends Luc } assertEquals(docs.NO_MORE_DOCS, docs.nextDoc()); } + */ + + public void testSeparateEnums() throws Exception { + Directory dir = new MockRAMDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + Document d = new Document(); + d.add(new Field("f", "j", Field.Store.NO, Field.Index.NOT_ANALYZED)); + w.addDocument(d); + w.commit(); + w.addDocument(d); + IndexReader r = w.getReader(); + w.close(); + DocsEnum d1 = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j")); + DocsEnum d2 = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j")); + assertEquals(0, d1.nextDoc()); + assertEquals(0, d2.nextDoc()); + r.close(); + dir.close(); + } } Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java Thu Jul 22 19:34:35 2010 @@ -21,7 +21,6 @@ import java.io.IOException; import java.io.Reader; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; @@ -34,6 +33,7 @@ import org.apache.lucene.store.Directory import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.BytesRef; /** * This testcase tests whether multi-level skipping is being used @@ -68,11 +68,12 @@ public class TestMultiLevelSkipList exte writer.close(); IndexReader reader = SegmentReader.getOnlySegmentReader(dir); - TermPositions tp = reader.termPositions(); for (int i = 0; i < 2; i++) { counter = 0; - tp.seek(term); + DocsAndPositionsEnum tp = reader.termPositionsEnum(reader.getDeletedDocs(), + term.field(), + new BytesRef(term.text())); checkSkipTo(tp, 14, 185); // no skips checkSkipTo(tp, 17, 190); // one skip on level 0 @@ -84,18 +85,18 @@ public class TestMultiLevelSkipList exte } } - public void checkSkipTo(TermPositions tp, int target, int maxCounter) throws IOException { - tp.skipTo(target); + public void checkSkipTo(DocsAndPositionsEnum tp, int target, int maxCounter) throws IOException { + tp.advance(target); if (maxCounter < counter) { fail("Too many bytes read: " + counter); } - assertEquals("Wrong document " + tp.doc() + " after skipTo target " + target, target, tp.doc()); + assertEquals("Wrong document " + tp.docID() + " after skipTo target " + target, target, tp.docID()); assertEquals("Frequency is not 1: " + tp.freq(), 1,tp.freq()); tp.nextPosition(); - byte[] b = new byte[1]; - tp.getPayload(b, 0); - assertEquals("Wrong payload for the target " + target + ": " + b[0], (byte) target, b[0]); + BytesRef b = tp.getPayload(); + assertEquals(1, b.length); + assertEquals("Wrong payload for the target " + target + ": " + b.bytes[b.offset], (byte) target, b.bytes[b.offset]); } private static class PayloadAnalyzer extends Analyzer { Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestOmitTf.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestOmitTf.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestOmitTf.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestOmitTf.java Thu Jul 22 19:34:35 2010 @@ -19,7 +19,6 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Collection; -import java.util.Random; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; @@ -87,10 +86,7 @@ public class TestOmitTf extends LuceneTe f2.setOmitTermFreqAndPositions(false); d.add(f2); - Random rnd = newRandom(); - writer.addDocument(d); - FlexTestUtil.verifyFlexVsPreFlex(rnd, writer); // force merge writer.optimize(); @@ -98,8 +94,6 @@ public class TestOmitTf extends LuceneTe writer.close(); _TestUtil.checkIndex(ram); - FlexTestUtil.verifyFlexVsPreFlex(rnd, ram); - SegmentReader reader = SegmentReader.getOnlySegmentReader(ram); FieldInfos fi = reader.fieldInfos(); assertTrue("OmitTermFreqAndPositions field bit should be set.", fi.fieldInfo("f1").omitTermFreqAndPositions); @@ -145,12 +139,8 @@ public class TestOmitTf extends LuceneTe for(int i=0;i<30;i++) writer.addDocument(d); - Random rnd = newRandom(); - FlexTestUtil.verifyFlexVsPreFlex(rnd, writer); - // force merge writer.optimize(); - FlexTestUtil.verifyFlexVsPreFlex(rnd, writer); // flush writer.close(); Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestParallelReader.java URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestParallelReader.java?rev=966819&r1=966818&r2=966819&view=diff ============================================================================== --- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestParallelReader.java (original) +++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestParallelReader.java Thu Jul 22 19:34:35 2010 @@ -195,26 +195,6 @@ public class TestParallelReader extends } - public void testAllTermDocs() throws IOException { - Directory dir1 = getDir1(); - Directory dir2 = getDir2(); - ParallelReader pr = new ParallelReader(); - pr.add(IndexReader.open(dir1, false)); - pr.add(IndexReader.open(dir2, false)); - int NUM_DOCS = 2; - TermDocs td = pr.termDocs(null); - for(int i=0;i