lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r553236 [6/6] - in /lucene/java/trunk: ./ contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/ docs/ src/java/org/apache/lucene/analysis/ src/java/org/apache/lucene/index/ src/java/org/apache/lucene/store/ src/site/src/documentati...
Date Wed, 04 Jul 2007 15:16:40 GMT
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Wed
Jul  4 08:16:38 2007
@@ -106,8 +106,12 @@
     rmDir(dirName);
   }
 
+  final String[] oldNames = {"prelockless.cfs",
+                             "prelockless.nocfs",
+                             "presharedstores.cfs",
+                             "presharedstores.nocfs"};
+
   public void testSearchOldIndex() throws IOException {
-    String[] oldNames = {"prelockless.cfs", "prelockless.nocfs"};
     for(int i=0;i<oldNames.length;i++) {
       String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
       unzip(dirName, oldNames[i]);
@@ -117,7 +121,6 @@
   }
 
   public void testIndexOldIndexNoAdds() throws IOException {
-    String[] oldNames = {"prelockless.cfs", "prelockless.nocfs"};
     for(int i=0;i<oldNames.length;i++) {
       String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
       unzip(dirName, oldNames[i]);
@@ -131,7 +134,6 @@
   }
 
   public void testIndexOldIndex() throws IOException {
-    String[] oldNames = {"prelockless.cfs", "prelockless.nocfs"};
     for(int i=0;i<oldNames.length;i++) {
       String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
       unzip(dirName, oldNames[i]);
@@ -312,8 +314,9 @@
         Directory dir = FSDirectory.getDirectory(fullDir(outputDir));
 
         boolean autoCommit = 0 == pass;
-      
+ 
         IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+        writer.setRAMBufferSizeMB(16.0);
         //IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
         for(int i=0;i<35;i++) {
           addDoc(writer, i);
@@ -337,8 +340,8 @@
         // figure out which field number corresponds to
         // "content", and then set our expected file names below
         // accordingly:
-        CompoundFileReader cfsReader = new CompoundFileReader(dir, "_2.cfs");
-        FieldInfos fieldInfos = new FieldInfos(cfsReader, "_2.fnm");
+        CompoundFileReader cfsReader = new CompoundFileReader(dir, "_0.cfs");
+        FieldInfos fieldInfos = new FieldInfos(cfsReader, "_0.fnm");
         int contentFieldIndex = -1;
         for(int i=0;i<fieldInfos.size();i++) {
           FieldInfo fi = fieldInfos.fieldInfo(i);
@@ -351,17 +354,15 @@
         assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex
!= -1);
 
         // Now verify file names:
-        String[] expected = {"_0.cfs",
-                             "_0_1.del",
-                             "_1.cfs",
-                             "_2.cfs",
-                             "_2_1.s" + contentFieldIndex,
-                             "_3.cfs",
-                             "segments_a",
-                             "segments.gen"};
-        if (!autoCommit) {
-          expected[6] = "segments_3";
-        }
+        String[] expected;
+        expected = new String[] {"_0.cfs",
+                    "_0_1.del",
+                    "_0_1.s" + contentFieldIndex,
+                    "segments_4",
+                    "segments.gen"};
+
+        if (!autoCommit)
+          expected[3] = "segments_3";
 
         String[] actual = dir.list();
         Arrays.sort(expected);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java Wed Jul  4
08:16:38 2007
@@ -256,6 +256,7 @@
       Directory dir = new RAMDirectory();
 
       IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true,
policy);
+      writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       for(int i=0;i<107;i++) {
         addDoc(writer);
@@ -318,6 +319,7 @@
       Directory dir = new RAMDirectory();
 
       IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true,
policy);
+      writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       for(int i=0;i<107;i++) {
         addDoc(writer);
@@ -365,6 +367,7 @@
 
       for(int j=0;j<N+1;j++) {
         IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true,
policy);
+        writer.setMaxBufferedDocs(10);
         writer.setUseCompoundFile(useCompoundFile);
         for(int i=0;i<17;i++) {
           addDoc(writer);
@@ -525,6 +528,7 @@
 
       Directory dir = new RAMDirectory();
       IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true,
policy);
+      writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       writer.close();
       Term searchTerm = new Term("content", "aaa");        
@@ -533,6 +537,7 @@
       for(int i=0;i<N+1;i++) {
 
         writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+        writer.setMaxBufferedDocs(10);
         writer.setUseCompoundFile(useCompoundFile);
         for(int j=0;j<17;j++) {
           addDoc(writer);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Wed Jul 
4 08:16:38 2007
@@ -51,6 +51,7 @@
     Directory dir = new RAMDirectory();
 
     IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+    writer.setMaxBufferedDocs(10);
     int i;
     for(i=0;i<35;i++) {
       addDoc(writer, i);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java Wed Jul  4 08:16:38
2007
@@ -74,6 +74,9 @@
     //  Lucene defaults:
     assertNull(i.getInfoStream());
     assertTrue(i.getUseCompoundFile());
+    /* new merge policy
+    assertEquals(0, i.getMaxBufferedDocs());
+    */
     assertEquals(10, i.getMaxBufferedDocs());
     assertEquals(10000, i.getMaxFieldLength());
     assertEquals(10, i.getMergeFactor());

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java Wed Jul  4 08:16:38
2007
@@ -803,7 +803,7 @@
           String[] startFiles = dir.list();
           SegmentInfos infos = new SegmentInfos();
           infos.read(dir);
-          IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(),
infos, null);
+          IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(),
infos, null, null);
           String[] endFiles = dir.list();
 
           Arrays.sort(startFiles);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java Wed Jul  4 08:16:38
2007
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.io.File;
 import java.util.Arrays;
+import java.util.Random;
 
 import junit.framework.TestCase;
 
@@ -478,7 +479,7 @@
       String[] startFiles = dir.list();
       SegmentInfos infos = new SegmentInfos();
       infos.read(dir);
-      IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(),
infos, null);
+      IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(),
infos, null, null);
       String[] endFiles = dir.list();
 
       Arrays.sort(startFiles);
@@ -859,6 +860,7 @@
     public void testCommitOnCloseAbort() throws IOException {
       Directory dir = new RAMDirectory();      
       IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.setMaxBufferedDocs(10);
       for (int i = 0; i < 14; i++) {
         addDoc(writer);
       }
@@ -871,6 +873,7 @@
       searcher.close();
 
       writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
+      writer.setMaxBufferedDocs(10);
       for(int j=0;j<17;j++) {
         addDoc(writer);
       }
@@ -895,6 +898,7 @@
       // Now make sure we can re-open the index, add docs,
       // and all is good:
       writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
+      writer.setMaxBufferedDocs(10);
       for(int i=0;i<12;i++) {
         for(int j=0;j<17;j++) {
           addDoc(writer);
@@ -962,6 +966,7 @@
     public void testCommitOnCloseOptimize() throws IOException {
       RAMDirectory dir = new RAMDirectory();      
       IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.setMaxBufferedDocs(10);
       for(int j=0;j<17;j++) {
         addDocWithIndex(writer, j);
       }
@@ -1000,6 +1005,255 @@
       // Reader should still see index as unoptimized:
       assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
       reader.close();
+    }
+
+    public void testIndexNoDocuments() throws IOException {
+      RAMDirectory dir = new RAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.flush();
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir);
+      assertEquals(0, reader.maxDoc());
+      assertEquals(0, reader.numDocs());
+      reader.close();
+
+      writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+      writer.flush();
+      writer.close();
+
+      reader = IndexReader.open(dir);
+      assertEquals(0, reader.maxDoc());
+      assertEquals(0, reader.numDocs());
+      reader.close();
+    }
+
+    public void testManyFields() throws IOException {
+      RAMDirectory dir = new RAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.setMaxBufferedDocs(10);
+      for(int j=0;j<100;j++) {
+        Document doc = new Document();
+        doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+        writer.addDocument(doc);
+      }
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir);
+      assertEquals(100, reader.maxDoc());
+      assertEquals(100, reader.numDocs());
+      for(int j=0;j<100;j++) {
+        assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
+        assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
+        assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
+        assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
+        assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
+        assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
+      }
+      reader.close();
+      dir.close();
+    }
+
+    public void testSmallRAMBuffer() throws IOException {
+      RAMDirectory dir = new RAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.setRAMBufferSizeMB(0.000001);
+      int lastNumFile = dir.list().length;
+      for(int j=0;j<9;j++) {
+        Document doc = new Document();
+        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+        writer.addDocument(doc);
+        int numFile = dir.list().length;
+        // Verify that with a tiny RAM buffer we see new
+        // segment after every doc
+        assertTrue(numFile > lastNumFile);
+        lastNumFile = numFile;
+      }
+      writer.close();
+      dir.close();
+    }
+
+    // Make sure it's OK to change RAM buffer size and
+    // maxBufferedDocs in a write session
+    public void testChangingRAMBuffer() throws IOException {
+      RAMDirectory dir = new RAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.setMaxBufferedDocs(10);
+      int lastNumFile = dir.list().length;
+      long lastGen = -1;
+      for(int j=1;j<52;j++) {
+        Document doc = new Document();
+        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+        writer.addDocument(doc);
+        long gen = SegmentInfos.generationFromSegmentsFileName(SegmentInfos.getCurrentSegmentFileName(dir.list()));
+        if (j == 1)
+          lastGen = gen;
+        else if (j < 10)
+          // No new files should be created
+          assertEquals(gen, lastGen);
+        else if (10 == j) {
+          assertTrue(gen > lastGen);
+          lastGen = gen;
+          writer.setRAMBufferSizeMB(0.000001);
+        } else if (j < 20) {
+          assertTrue(gen > lastGen);
+          lastGen = gen;
+        } else if (20 == j) {
+          writer.setRAMBufferSizeMB(16);
+          lastGen = gen;
+        } else if (j < 30) {
+          assertEquals(gen, lastGen);
+        } else if (30 == j) {
+          writer.setRAMBufferSizeMB(0.000001);
+        } else if (j < 40) {
+          assertTrue(gen> lastGen);
+          lastGen = gen;
+        } else if (40 == j) {
+          writer.setMaxBufferedDocs(10);
+          lastGen = gen;
+        } else if (j < 50) {
+          assertEquals(gen, lastGen);
+          writer.setMaxBufferedDocs(10);
+        } else if (50 == j) {
+          assertTrue(gen > lastGen);
+        }
+      }
+      writer.close();
+      dir.close();
+    }
+
+    public void testDiverseDocs() throws IOException {
+      RAMDirectory dir = new RAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      // writer.setInfoStream(System.out);
+      long t0 = System.currentTimeMillis();
+      writer.setRAMBufferSizeMB(0.5);
+      Random rand = new Random(31415);
+      for(int i=0;i<3;i++) {
+        // First, docs where every term is unique (heavy on
+        // Posting instances)
+        for(int j=0;j<100;j++) {
+          Document doc = new Document();
+          for(int k=0;k<100;k++) {
+            doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES,
Field.Index.TOKENIZED));
+          }
+          writer.addDocument(doc);
+        }
+
+        // Next, many single term docs where only one term
+        // occurs (heavy on byte blocks)
+        for(int j=0;j<100;j++) {
+          Document doc = new Document();
+          doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES,
Field.Index.TOKENIZED));
+          writer.addDocument(doc);
+        }
+
+        // Next, many single term docs where only one term
+        // occurs but the terms are very long (heavy on
+        // char[] arrays)
+        for(int j=0;j<100;j++) {
+          StringBuffer b = new StringBuffer();
+          String x = Integer.toString(j) + ".";
+          for(int k=0;k<1000;k++)
+            b.append(x);
+          String longTerm = b.toString();
+
+          Document doc = new Document();
+          doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.TOKENIZED));
+          writer.addDocument(doc);
+        }
+      }
+      writer.close();
+
+      long t1 = System.currentTimeMillis();
+      IndexSearcher searcher = new IndexSearcher(dir);
+      Hits hits = searcher.search(new TermQuery(new Term("field", "aaa")));
+      assertEquals(300, hits.length());
+      searcher.close();
+
+      dir.close();
+    }
+
+    public void testEnablingNorms() throws IOException {
+      RAMDirectory dir = new RAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.setMaxBufferedDocs(10);
+      // Enable norms for only 1 doc, pre flush
+      for(int j=0;j<10;j++) {
+        Document doc = new Document();
+        Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED); 
+        if (j != 8) {
+          f.setOmitNorms(true);
+        }
+        doc.add(f);
+        writer.addDocument(doc);
+      }
+      writer.close();
+
+      Term searchTerm = new Term("field", "aaa");
+
+      IndexSearcher searcher = new IndexSearcher(dir);
+      Hits hits = searcher.search(new TermQuery(searchTerm));
+      assertEquals(10, hits.length());
+      searcher.close();
+
+      writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.setMaxBufferedDocs(10);
+      // Enable norms for only 1 doc, post flush
+      for(int j=0;j<27;j++) {
+        Document doc = new Document();
+        Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED); 
+        if (j != 26) {
+          f.setOmitNorms(true);
+        }
+        doc.add(f);
+        writer.addDocument(doc);
+      }
+      writer.close();
+      searcher = new IndexSearcher(dir);
+      hits = searcher.search(new TermQuery(searchTerm));
+      assertEquals(27, hits.length());
+      searcher.close();
+
+      IndexReader reader = IndexReader.open(dir);
+      reader.close();
+
+      dir.close();
+    }
+
+    public void testHighFreqTerm() throws IOException {
+      RAMDirectory dir = new RAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      writer.setRAMBufferSizeMB(0.01);
+      writer.setMaxFieldLength(100000000);
+      // Massive doc that has 128 K a's
+      StringBuffer b = new StringBuffer(1024*1024);
+      for(int i=0;i<4096;i++) {
+        b.append(" a a a a a a a a");
+        b.append(" a a a a a a a a");
+        b.append(" a a a a a a a a");
+        b.append(" a a a a a a a a");
+      }
+      Document doc = new Document();
+      doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      writer.addDocument(doc);
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir);
+      assertEquals(1, reader.maxDoc());
+      assertEquals(1, reader.numDocs());
+      Term t = new Term("field", "a");
+      assertEquals(1, reader.docFreq(t));
+      TermDocs td = reader.termDocs(t);
+      td.next();
+      assertEquals(128*1024, td.freq());
+      reader.close();
+      dir.close();
     }
 
     // Make sure that a Directory implementation that does

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Wed Jul
 4 08:16:38 2007
@@ -110,7 +110,7 @@
       }
       modifier.flush();
 
-      assertEquals(0, modifier.getRamSegmentCount());
+      assertEquals(0, modifier.getNumBufferedDocuments());
       assertTrue(0 < modifier.getSegmentCount());
 
       if (!autoCommit) {
@@ -452,7 +452,7 @@
           String[] startFiles = dir.list();
           SegmentInfos infos = new SegmentInfos();
           infos.read(dir);
-          IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(),
infos, null);
+          IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(),
infos, null, null);
           String[] endFiles = dir.list();
 
           Arrays.sort(startFiles);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java Wed
Jul  4 08:16:38 2007
@@ -57,7 +57,7 @@
     for (int i = 0; i < 100; i++) {
       addDoc(writer);
       checkInvariants(writer);
-      if (writer.getRamSegmentCount() + writer.getSegmentCount() >= 18) {
+      if (writer.getNumBufferedDocuments() + writer.getSegmentCount() >= 18) {
         noOverMerge = true;
       }
     }
@@ -195,7 +195,7 @@
     int mergeFactor = writer.getMergeFactor();
     int maxMergeDocs = writer.getMaxMergeDocs();
 
-    int ramSegmentCount = writer.getRamSegmentCount();
+    int ramSegmentCount = writer.getNumBufferedDocuments();
     assertTrue(ramSegmentCount < maxBufferedDocs);
 
     int lowerBound = -1;

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java Wed Jul 
4 08:16:38 2007
@@ -50,7 +50,7 @@
         
         Directory directory = new RAMDirectory();
         IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
-        
+        writer.setMaxBufferedDocs(10);
         for (int i = 0; i < numDocs; i++) {
             Document doc = new Document();
             String content;

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java Wed Jul  4 08:16:38
2007
@@ -467,7 +467,8 @@
                             d.add(new Field(field, new PoolingPayloadTokenStream(pool)));
                             writer.addDocument(d);
                         }
-                    } catch (IOException e) {
+                    } catch (Exception e) {
+                        e.printStackTrace();
                         fail(e.toString());
                     }
                 }
@@ -480,7 +481,6 @@
                 ingesters[i].join();
             } catch (InterruptedException e) {}
         }
-        
         writer.close();
         IndexReader reader = IndexReader.open(dir);
         TermEnum terms = reader.terms();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java Wed Jul  4
08:16:38 2007
@@ -74,8 +74,6 @@
           count++;
         }
         
-        modifier.close();
-
       } catch (Exception e) {
         System.out.println(e.toString());
         e.printStackTrace();
@@ -125,6 +123,9 @@
     IndexerThread indexerThread = new IndexerThread(modifier);
     indexerThread.start();
       
+    IndexerThread indexerThread2 = new IndexerThread(modifier);
+    indexerThread2.start();
+      
     // Two searchers that constantly just re-instantiate the searcher:
     SearcherThread searcherThread1 = new SearcherThread(directory);
     searcherThread1.start();
@@ -133,9 +134,14 @@
     searcherThread2.start();
 
     indexerThread.join();
+    indexerThread2.join();
     searcherThread1.join();
     searcherThread2.join();
+
+    modifier.close();
+
     assertTrue("hit unexpected exception in indexer", !indexerThread.failed);
+    assertTrue("hit unexpected exception in indexer 2", !indexerThread2.failed);
     assertTrue("hit unexpected exception in search1", !searcherThread1.failed);
     assertTrue("hit unexpected exception in search2", !searcherThread2.failed);
     //System.out.println("    Writer: " + indexerThread.count + " iterations");

Added: lucene/java/trunk/src/test/org/apache/lucene/index/index.presharedstores.cfs.zip
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/index.presharedstores.cfs.zip?view=auto&rev=553236
==============================================================================
Binary file - no diff available.

Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/index.presharedstores.cfs.zip
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: lucene/java/trunk/src/test/org/apache/lucene/index/index.presharedstores.nocfs.zip
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/index.presharedstores.nocfs.zip?view=auto&rev=553236
==============================================================================
Binary file - no diff available.

Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/index.presharedstores.nocfs.zip
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Modified: lucene/java/trunk/src/test/org/apache/lucene/search/TestTermVectors.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/search/TestTermVectors.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/search/TestTermVectors.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/search/TestTermVectors.java Wed Jul  4 08:16:38
2007
@@ -291,6 +291,80 @@
         Field.Index.TOKENIZED, Field.TermVector.YES));
     //System.out.println("Document: " + doc);
   }
-  
-  
+
+  // Test only a few docs having vectors
+  public void testRareVectors() throws IOException {
+    IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true);
+    for(int i=0;i<100;i++) {
+      Document doc = new Document();
+      doc.add(new Field("field", English.intToEnglish(i),
+                        Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
+      writer.addDocument(doc);
+    }
+    for(int i=0;i<10;i++) {
+      Document doc = new Document();
+      doc.add(new Field("field", English.intToEnglish(100+i),
+                        Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      writer.addDocument(doc);
+    }
+
+    writer.close();
+    searcher = new IndexSearcher(directory);
+
+    Query query = new TermQuery(new Term("field", "hundred"));
+    Hits hits = searcher.search(query);
+    assertEquals(10, hits.length());
+    for (int i = 0; i < hits.length(); i++) {
+      TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits.id(i));
+      assertTrue(vector != null);
+      assertTrue(vector.length == 1);
+    }
+  }
+
+
+  // In a single doc, for the same field, mix the term
+  // vectors up
+  public void testMixedVectrosVectors() throws IOException {
+    IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true);
+    Document doc = new Document();
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_OFFSETS));
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+    writer.close();
+
+    searcher = new IndexSearcher(directory);
+
+    Query query = new TermQuery(new Term("field", "one"));
+    Hits hits = searcher.search(query);
+    assertEquals(1, hits.length());
+
+    TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits.id(0));
+    assertTrue(vector != null);
+    assertTrue(vector.length == 1);
+    TermPositionVector tfv = (TermPositionVector) vector[0];
+    assertTrue(tfv.getField().equals("field"));
+    String[] terms = tfv.getTerms();
+    assertEquals(1, terms.length);
+    assertEquals(terms[0], "one");
+    assertEquals(5, tfv.getTermFrequencies()[0]);
+
+    int[] positions = tfv.getTermPositions(0);
+    assertEquals(5, positions.length);
+    for(int i=0;i<5;i++)
+      assertEquals(i, positions[i]);
+    TermVectorOffsetInfo[] offsets = tfv.getOffsets(0);
+    assertEquals(5, offsets.length);
+    for(int i=0;i<5;i++) {
+      assertEquals(4*i, offsets[i].getStartOffset());
+      assertEquals(4*i+3, offsets[i].getEndOffset());
+    }
+  }
 }



Mime
View raw message