lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r921608 [2/2] - in /lucene/java/branches/flex_1458/src: java/org/apache/lucene/index/ java/org/apache/lucene/index/codecs/ java/org/apache/lucene/index/codecs/intblock/ java/org/apache/lucene/index/codecs/preflex/ java/org/apache/lucene/ind...
Date Wed, 10 Mar 2010 22:53:38 GMT
Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/TermScorer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/TermScorer.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/TermScorer.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/TermScorer.java Wed Mar
10 22:53:37 2010
@@ -29,14 +29,15 @@ final class TermScorer extends Scorer {
   private byte[] norms;
   private float weightValue;
   private int doc = -1;
+  private int freq;
 
-  private final int[] docs = new int[32];         // buffered doc numbers
-  private final int[] freqs = new int[32];        // buffered term freqs
   private int pointer;
   private int pointerMax;
 
   private static final int SCORE_CACHE_SIZE = 32;
   private float[] scoreCache = new float[SCORE_CACHE_SIZE];
+  private int[] docs;
+  private int[] freqs;
 
   /**
    * Construct a <code>TermScorer</code>.
@@ -68,6 +69,13 @@ final class TermScorer extends Scorer {
     score(c, Integer.MAX_VALUE, nextDoc());
   }
 
+  private final void refillBuffer() throws IOException {
+    final DocsEnum.BulkReadResult result = docsEnum.read();  // refill
+    pointerMax = result.count;
+    docs = result.docs.ints;
+    freqs = result.freqs.ints;
+  }
+
   // firstDocID is ignored since nextDoc() sets 'doc'
   @Override
   protected boolean score(Collector c, int end, int firstDocID) throws IOException {
@@ -75,7 +83,7 @@ final class TermScorer extends Scorer {
     while (doc < end) {                           // for docs in window
       c.collect(doc);                      // collect score
       if (++pointer >= pointerMax) {
-        pointerMax = docsEnum.read(docs, freqs);  // refill buffers
+        refillBuffer();
         if (pointerMax != 0) {
           pointer = 0;
         } else {
@@ -84,12 +92,15 @@ final class TermScorer extends Scorer {
         }
       } 
       doc = docs[pointer];
+      freq = freqs[pointer];
     }
     return true;
   }
 
   @Override
-  public int docID() { return doc; }
+  public int docID() {
+    return doc;
+  }
 
   /**
    * Advances to the next document matching the query. <br>
@@ -102,7 +113,7 @@ final class TermScorer extends Scorer {
   public int nextDoc() throws IOException {
     pointer++;
     if (pointer >= pointerMax) {
-      pointerMax = docsEnum.read(docs, freqs);    // refill buffer
+      refillBuffer();
       if (pointerMax != 0) {
         pointer = 0;
       } else {
@@ -110,6 +121,7 @@ final class TermScorer extends Scorer {
       }
     } 
     doc = docs[pointer];
+    freq = freqs[pointer];
     assert doc != NO_MORE_DOCS;
     return doc;
   }
@@ -117,11 +129,10 @@ final class TermScorer extends Scorer {
   @Override
   public float score() {
     assert doc != NO_MORE_DOCS;
-    int f = freqs[pointer];
     float raw =                                   // compute tf(f)*weight
-      f < SCORE_CACHE_SIZE                        // check cache
-      ? scoreCache[f]                             // cache hit
-      : getSimilarity().tf(f)*weightValue;        // cache miss
+      freq < SCORE_CACHE_SIZE                        // check cache
+      ? scoreCache[freq]                             // cache hit
+      : getSimilarity().tf(freq)*weightValue;        // cache miss
 
     return norms == null ? raw : raw * getSimilarity().decodeNormValue(norms[doc]); // normalize
for field
   }
@@ -140,6 +151,7 @@ final class TermScorer extends Scorer {
     // first scan in cache
     for (pointer++; pointer < pointerMax; pointer++) {
       if (docs[pointer] >= target) {
+        freq = freqs[pointer];
         return doc = docs[pointer];
       }
     }
@@ -148,10 +160,8 @@ final class TermScorer extends Scorer {
     int newDoc = docsEnum.advance(target);
     //System.out.println("ts.advance docsEnum=" + docsEnum);
     if (newDoc != DocsEnum.NO_MORE_DOCS) {
-      pointerMax = 1;
-      pointer = 0;
-      docs[pointer] = doc = newDoc;
-      freqs[pointer] = docsEnum.freq();
+      doc = newDoc;
+      freq = docsEnum.freq();
     } else {
       doc = NO_MORE_DOCS;
     }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/util/BytesRef.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/util/BytesRef.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/util/BytesRef.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/util/BytesRef.java Wed Mar 10
22:53:37 2010
@@ -23,7 +23,7 @@ import java.io.UnsupportedEncodingExcept
 /** Represents byte[], as a slice (offset + length) into an
  *  existing byte[].
  *
- *  @lucene.internal */
+ *  @lucene.experimental */
 public final class BytesRef {
 
   public byte[] bytes;

Modified: lucene/java/branches/flex_1458/src/test/org/apache/lucene/TestExternalCodecs.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/test/org/apache/lucene/TestExternalCodecs.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/test/org/apache/lucene/TestExternalCodecs.java (original)
+++ lucene/java/branches/flex_1458/src/test/org/apache/lucene/TestExternalCodecs.java Wed
Mar 10 22:53:37 2010
@@ -460,9 +460,9 @@ public class TestExternalCodecs extends 
     }
 
     @Override
-    public FieldsProducer fieldsProducer(Directory dir, FieldInfos fieldInfos, SegmentInfo
si, int readBufferSize, int indexDivisor)
+    public FieldsProducer fieldsProducer(SegmentReadState readState)
       throws IOException {
-      return state.get(si.name);
+      return state.get(readState.segmentInfo.name);
     }
 
     @Override
@@ -565,7 +565,7 @@ public class TestExternalCodecs extends 
             fields.add(fi.name);
             Codec codec = getCodec(fi.name);
             if (!codecs.containsKey(codec)) {
-              codecs.put(codec, codec.fieldsProducer(dir, fieldInfos, si, readBufferSize,
indexDivisor));
+              codecs.put(codec, codec.fieldsProducer(new SegmentReadState(dir, si, fieldInfos,
readBufferSize, indexDivisor)));
             }
           }
         }
@@ -644,11 +644,9 @@ public class TestExternalCodecs extends 
       }
     }
 
-    public FieldsProducer fieldsProducer(Directory dir, FieldInfos fieldInfos,
-                                         SegmentInfo si, int readBufferSize,
-                                         int indexDivisor)
-      throws IOException {
-      return new FieldsReader(dir, fieldInfos, si, readBufferSize, indexDivisor);
+    public FieldsProducer fieldsProducer(SegmentReadState state)
+        throws IOException {
+      return new FieldsReader(state.dir, state.fieldInfos, state.segmentInfo, state.readBufferSize,
state.termsIndexDivisor);
     }
 
     @Override
@@ -674,7 +672,7 @@ public class TestExternalCodecs extends 
     }
   }
 
-  public static class MyCodecs extends Codecs {
+  public static class MyCodecs extends CodecProvider {
     PerFieldCodecWrapper perField;
 
     MyCodecs() {
@@ -739,9 +737,9 @@ public class TestExternalCodecs extends 
     }
 
     @Override
-    public FieldsProducer fieldsProducer(Directory dir, FieldInfos fieldInfos, SegmentInfo
si, int readBufferSize, int indexDivisor) throws IOException {
+    public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
 
-      StandardPostingsReader docsReader = new StandardPostingsReaderImpl(dir, si, readBufferSize);
+      StandardPostingsReader docsReader = new StandardPostingsReaderImpl(state.dir, state.segmentInfo,
state.readBufferSize);
       StandardPostingsReader pulsingReader = new PulsingPostingsReaderImpl(docsReader);
 
       // Terms dict index reader
@@ -749,10 +747,10 @@ public class TestExternalCodecs extends 
 
       boolean success = false;
       try {
-        indexReader = new SimpleStandardTermsIndexReader(dir,
-                                                         fieldInfos,
-                                                         si.name,
-                                                         indexDivisor,
+        indexReader = new SimpleStandardTermsIndexReader(state.dir,
+                                                         state.fieldInfos,
+                                                         state.segmentInfo.name,
+                                                         state.termsIndexDivisor,
                                                          reverseUnicodeComparator);
         success = true;
       } finally {
@@ -765,9 +763,11 @@ public class TestExternalCodecs extends 
       success = false;
       try {
         FieldsProducer ret = new StandardTermsDictReader(indexReader,
-                                                         dir, fieldInfos, si.name,
+                                                         state.dir,
+                                                         state.fieldInfos,
+                                                         state.segmentInfo.name,
                                                          pulsingReader,
-                                                         readBufferSize,
+                                                         state.readBufferSize,
                                                          reverseUnicodeComparator,
                                                          StandardCodec.TERMS_CACHE_SIZE);
         success = true;

Modified: lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/FlexTestUtil.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/FlexTestUtil.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/FlexTestUtil.java (original)
+++ lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/FlexTestUtil.java Wed
Mar 10 22:53:37 2010
@@ -202,8 +202,6 @@ public class FlexTestUtil {
     }
     FieldsEnum fieldsEnum = fields.iterator();
     boolean skipNext = false;
-    int[] docs1 = new int[16];
-    int[] freqs1 = new int[16];
     int[] docs2 = new int[16];
     int[] freqs2 = new int[16];
     while(true) {
@@ -253,32 +251,35 @@ public class FlexTestUtil {
               if (rand.nextBoolean()) {
                 // use bulk read API
                 termDocs.seek(t);
-                int count1 = 0;
+                DocsEnum.BulkReadResult result1 = null;
                 int count2 = 0;
                 while(true) {
-                  if (count1 == 0) {
-                    count1 = docs.read(docs1, freqs1);
+                  if (result1 == null || result1.count == 0) {
+                    result1 = docs.read();
                   }
                   if (count2 == 0) {
                     count2 = termDocs.read(docs2, freqs2);
                   }
 
-                  if (count1 == 0 || count2 == 0) {
+                  if (result1.count == 0 || count2 == 0) {
                     assertEquals(0, count2);
-                    assertEquals(0, count1);
+                    assertEquals(0, result1.count);
                     break;
                   }
-                  final int limit = Math.min(count1, count2);
+                  final int limit = Math.min(result1.count, count2);
                   for(int i=0;i<limit;i++) {
-                    assertEquals(docs1[i], docs2[i]);
-                    assertEquals(freqs1[i], freqs2[i]);
+                    assertEquals(result1.docs.ints[i], docs2[i]);
+                    assertEquals(result1.freqs.ints[i], freqs2[i]);
                   }
-                  if (count1 > limit) {
+                  if (result1.count > limit) {
                     // copy down
-                    System.arraycopy(docs1, limit, docs1, 0, count1-limit);
-                    System.arraycopy(freqs1, limit, freqs1, 0, count1-limit);
+                    // nocommit -- hmm in general I should
+                    // not muck w/ the int[]'s returned to
+                    // me like this...?
+                    System.arraycopy(result1.docs.ints, limit, result1.docs.ints, 0, result1.count-limit);
+                    System.arraycopy(result1.freqs.ints, limit, result1.freqs.ints, 0, result1.count-limit);
                   }
-                  count1 -= limit;
+                  result1.count -= limit;
 
                   if (count2 > limit) {
                     // copy down

Modified: lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestCodecs.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestCodecs.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestCodecs.java (original)
+++ lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestCodecs.java Wed Mar
10 22:53:37 2010
@@ -262,10 +262,10 @@ public class TestCodecs extends LuceneTe
 
     Directory dir = new MockRAMDirectory();
     write(fieldInfos, dir, fields);
-    SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, Codecs.getDefault().getWriter(null));
+    SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, CodecProvider.getDefault().getWriter(null));
     si.setHasProx(false);
 
-    FieldsProducer reader = si.getCodec().fieldsProducer(dir, fieldInfos, si, 64, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+    FieldsProducer reader = si.getCodec().fieldsProducer(new SegmentReadState(dir, si, fieldInfos,
64, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR));
     
     FieldsEnum fieldsEnum = reader.iterator();
     assertNotNull(fieldsEnum.next());
@@ -300,13 +300,13 @@ public class TestCodecs extends LuceneTe
     Directory dir = new MockRAMDirectory();
 
     write(fieldInfos, dir, fields);
-    SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, Codecs.getDefault().getWriter(null));
+    SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, CodecProvider.getDefault().getWriter(null));
 
     if (Codec.DEBUG) {
       System.out.println("\nTEST: now read");
     }
 
-    FieldsProducer terms = si.getCodec().fieldsProducer(dir, fieldInfos, si, 1024, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+    FieldsProducer terms = si.getCodec().fieldsProducer(new SegmentReadState(dir, si, fieldInfos,
1024, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR));
 
     Verify[] threads = new Verify[NUM_TEST_THREADS-1];
     for(int i=0;i<NUM_TEST_THREADS-1;i++) {
@@ -587,7 +587,7 @@ public class TestCodecs extends LuceneTe
     final int termIndexInterval = nextInt(13, 27);
 
     SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, null,
10000, 10000, termIndexInterval,
-                                                    Codecs.getDefault());
+                                                    CodecProvider.getDefault());
 
     final FieldsConsumer consumer = state.codec.fieldsConsumer(state);
     Arrays.sort(fields);

Modified: lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestDoc.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestDoc.java Wed Mar 10
22:53:37 2010
@@ -35,7 +35,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.index.codecs.Codecs;
+import org.apache.lucene.index.codecs.CodecProvider;
 
 
 /** JUnit adaptation of an older test case DocTest. */
@@ -181,7 +181,7 @@ public class TestDoc extends LuceneTestC
       SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
       SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
 
-      SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL,
merged, null, Codecs.getDefault());
+      SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL,
merged, null, CodecProvider.getDefault());
 
       merger.add(r1);
       merger.add(r2);

Modified: lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestIndexReader.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestIndexReader.java Wed
Mar 10 22:53:37 2010
@@ -21,7 +21,6 @@ package org.apache.lucene.index;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -42,7 +41,7 @@ import org.apache.lucene.document.FieldS
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.document.SetBasedFieldSelector;
 import org.apache.lucene.index.IndexReader.FieldOption;
-import org.apache.lucene.index.codecs.Codecs;
+import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
@@ -56,7 +55,6 @@ import org.apache.lucene.store.NoSuchDir
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.index.codecs.Codec;
 
 public class TestIndexReader extends LuceneTestCase
 {
@@ -1133,17 +1131,6 @@ public class TestIndexReader extends Luc
       dir.close();
     }
 
-    private String arrayToString(String[] l) {
-      String s = "";
-      for(int i=0;i<l.length;i++) {
-        if (i > 0) {
-          s += "\n    ";
-        }
-        s += l[i];
-      }
-      return s;
-    }
-
     public void testOpenReaderAfterDelete() throws IOException {
       File dirFile = new File(System.getProperty("tempDir"),
                           "deletetest");
@@ -1410,7 +1397,7 @@ public class TestIndexReader extends Luc
       writer.close();
 
       SegmentInfos sis = new SegmentInfos();
-      sis.read(d, Codecs.getDefault());
+      sis.read(d, CodecProvider.getDefault());
       IndexReader r = IndexReader.open(d, false);
       IndexCommit c = r.getIndexCommit();
 

Modified: lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestIndexWriter.java Wed
Mar 10 22:53:37 2010
@@ -51,7 +51,7 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
-import org.apache.lucene.index.codecs.Codecs;
+import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
@@ -532,7 +532,7 @@ public class TestIndexWriter extends Luc
       String[] startFiles = dir.listAll();
       SegmentInfos infos = new SegmentInfos();
       infos.read(dir);
-      new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null,
Codecs.getDefault());
+      new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null,
CodecProvider.getDefault());
       String[] endFiles = dir.listAll();
 
       Arrays.sort(startFiles);

Modified: lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestSegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestSegmentMerger.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestSegmentMerger.java
(original)
+++ lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestSegmentMerger.java
Wed Mar 10 22:53:37 2010
@@ -22,7 +22,7 @@ import org.apache.lucene.store.BufferedI
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.index.codecs.Codecs;
+import org.apache.lucene.index.codecs.CodecProvider;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -65,7 +65,7 @@ public class TestSegmentMerger extends L
   }
   
   public void testMerge() throws IOException {                             
-    SegmentMerger merger = new SegmentMerger(mergedDir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL,
mergedSegment, null, Codecs.getDefault());
+    SegmentMerger merger = new SegmentMerger(mergedDir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL,
mergedSegment, null, CodecProvider.getDefault());
     merger.add(reader1);
     merger.add(reader2);
     int docsMerged = merger.merge();

Modified: lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=921608&r1=921607&r2=921608&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestStressIndexing2.java
(original)
+++ lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestStressIndexing2.java
Wed Mar 10 22:53:37 2010
@@ -17,10 +17,8 @@ package org.apache.lucene.index;
 import org.apache.lucene.store.*;
 import org.apache.lucene.document.*;
 import org.apache.lucene.analysis.*;
+import org.apache.lucene.util.*;
 
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.search.TermQuery;
 
 import java.util.*;
@@ -28,8 +26,6 @@ import java.io.IOException;
 
 import junit.framework.Assert;
 
-// nocommit -- cut test over to flex API, but not too soon
-// (it catches bugs in emulation)
 public class TestStressIndexing2 extends LuceneTestCase {
   static int maxFields=4;
   static int bigFieldSize=10;
@@ -278,32 +274,53 @@ public class TestStressIndexing2 extends
 
     int[] r2r1 = new int[r2.maxDoc()];   // r2 id to r1 id mapping
 
-    TermDocs termDocs1 = r1.termDocs();
-    TermDocs termDocs2 = r2.termDocs();
-
     // create mapping from id2 space to id2 based on idField
     idField = StringHelper.intern(idField);
-    TermEnum termEnum = r1.terms (new Term (idField, ""));
-    do {
-      Term term = termEnum.term();
+    final TermsEnum termsEnum = MultiFields.getFields(r1).terms(idField).iterator();
+
+    final Bits delDocs1 = MultiFields.getDeletedDocs(r1);
+    final Bits delDocs2 = MultiFields.getDeletedDocs(r2);
+    
+    Fields fields = MultiFields.getFields(r2);
+    if (fields == null) {
+      // make sure r1 is in fract empty (eg has only all
+      // deleted docs):
+      DocsEnum docs = null;
+      while(termsEnum.next() != null) {
+        docs = termsEnum.docs(delDocs1, docs);
+        while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+          fail("r1 is not empty but r2 is");
+        }
+      }
+      return;
+    }
+    Terms terms2 = fields.terms(idField);
+
+    DocsEnum termDocs1 = null;
+    DocsEnum termDocs2 = null;
+
+    while(true) {
+      BytesRef term = termsEnum.next();
       //System.out.println("TEST: match id term=" + term);
-      if (term==null || term.field() != idField) break;
+      if (term == null) {
+        break;
+      }
 
-      termDocs1.seek (termEnum);
-      if (!termDocs1.next()) {
+      termDocs1 = termsEnum.docs(delDocs1, termDocs1);
+      termDocs2 = terms2.docs(delDocs2, term, termDocs2);
+
+      if (termDocs1.nextDoc() == DocsEnum.NO_MORE_DOCS) {
         // This doc is deleted and wasn't replaced
-        termDocs2.seek(termEnum);
-        assertFalse(termDocs2.next());
+        assertTrue(termDocs2 == null || termDocs2.nextDoc() == DocsEnum.NO_MORE_DOCS);
         continue;
       }
 
-      int id1 = termDocs1.doc();
-      assertFalse(termDocs1.next());
+      int id1 = termDocs1.docID();
+      assertEquals(DocsEnum.NO_MORE_DOCS, termDocs1.nextDoc());
 
-      termDocs2.seek(termEnum);
-      assertTrue(termDocs2.next());
-      int id2 = termDocs2.doc();
-      assertFalse(termDocs2.next());
+      assertTrue(termDocs2.nextDoc() != DocsEnum.NO_MORE_DOCS);
+      int id2 = termDocs2.docID();
+      assertEquals(DocsEnum.NO_MORE_DOCS, termDocs2.nextDoc());
 
       r2r1[id2] = id1;
 
@@ -337,65 +354,95 @@ public class TestStressIndexing2 extends
         throw e;
       }
 
-    } while (termEnum.next());
+    }
 
-    termEnum.close();
     //System.out.println("TEST: done match id");
 
     // Verify postings
     //System.out.println("TEST: create te1");
-    TermEnum termEnum1 = r1.terms (new Term ("", ""));
-    //System.out.println("TEST: create te2");
-    TermEnum termEnum2 = r2.terms (new Term ("", ""));
+    final FieldsEnum fields1 = MultiFields.getFields(r1).iterator();
+    final FieldsEnum fields2 = MultiFields.getFields(r2).iterator();
+
+    String field1=null, field2=null;
+    TermsEnum termsEnum1 = null;
+    TermsEnum termsEnum2 = null;
+    DocsEnum docs1=null, docs2=null;
 
     // pack both doc and freq into single element for easy sorting
     long[] info1 = new long[r1.numDocs()];
     long[] info2 = new long[r2.numDocs()];
 
     for(;;) {
-      Term term1,term2;
+      BytesRef term1=null, term2=null;
 
       // iterate until we get some docs
       int len1;
       for(;;) {
         len1=0;
-        term1 = termEnum1.term();
+        if (termsEnum1 == null) {
+          field1 = fields1.next();
+          if (field1 == null) {
+            break;
+          } else {
+            termsEnum1 = fields1.terms();
+          }
+        }
+        term1 = termsEnum1.next();
+        if (term1 == null) {
+          // no more terms in this field
+          termsEnum1 = null;
+          continue;
+        }
+        
         //System.out.println("TEST: term1=" + term1);
-        if (term1==null) break;
-        termDocs1.seek(termEnum1);
-        while (termDocs1.next()) {
-          int d1 = termDocs1.doc();
-          int f1 = termDocs1.freq();
-          info1[len1] = (((long)d1)<<32) | f1;
+        docs1 = termsEnum1.docs(delDocs1, docs1);
+        while (docs1.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+          int d = docs1.docID();
+          int f = docs1.freq();
+          info1[len1] = (((long)d)<<32) | f;
           len1++;
         }
         if (len1>0) break;
-        if (!termEnum1.next()) break;
       }
 
-       // iterate until we get some docs
+      // iterate until we get some docs
       int len2;
       for(;;) {
         len2=0;
-        term2 = termEnum2.term();
-        //System.out.println("TEST: term2=" + term2);
-        if (term2==null) break;
-        termDocs2.seek(termEnum2);
-        while (termDocs2.next()) {
-          int d2 = termDocs2.doc();
-          int f2 = termDocs2.freq();
-          info2[len2] = (((long)r2r1[d2])<<32) | f2;
+        if (termsEnum2 == null) {
+          field2 = fields2.next();
+          if (field2 == null) {
+            break;
+          } else {
+            termsEnum2 = fields2.terms();
+          }
+        }
+        term2 = termsEnum2.next();
+        if (term2 == null) {
+          // no more terms in this field
+          termsEnum2 = null;
+          continue;
+        }
+        
+        //System.out.println("TEST: term1=" + term1);
+        docs2 = termsEnum2.docs(delDocs2, docs2);
+        while (docs2.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+          int d = r2r1[docs2.docID()];
+          int f = docs2.freq();
+          info2[len2] = (((long)d)<<32) | f;
           len2++;
         }
         if (len2>0) break;
-        if (!termEnum2.next()) break;
       }
 
       assertEquals(len1, len2);
       if (len1==0) break;  // no more terms
 
+      assertEquals(field1, field2);
+      assertTrue(term1.bytesEquals(term2));
+
       if (!hasDeletes)
-        assertEquals(termEnum1.docFreq(), termEnum2.docFreq());
+        assertEquals(termsEnum1.docFreq(), termsEnum2.docFreq());
 
       assertEquals("len1=" + len1 + " len2=" + len2 + " deletes?=" + hasDeletes, term1, term2);
 
@@ -404,11 +451,11 @@ public class TestStressIndexing2 extends
 
       // now compare
       for (int i=0; i<len1; i++) {
-        assertEquals(info1[i], info2[i]);
+        assertEquals("i=" + i + " len=" + len1 + " d1=" + (info1[i]>>>32) + " f1="
+ (info1[i]&Integer.MAX_VALUE) + " d2=" + (info2[i]>>>32) + " f2=" + (info2[i]&Integer.MAX_VALUE)
+
+                     " field=" + field1 + " term=" + term1.utf8ToString(),
+                     info1[i],
+                     info2[i]);
       }
-
-      termEnum1.next();
-      termEnum2.next();
     }
   }
 



Mime
View raw message