lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r823321 [2/2] - in /lucene/java/trunk: ./ contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ contrib/benchmark/conf/ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/ contrib/benchmark/src/java/org/apache/luc...
Date Thu, 08 Oct 2009 20:57:34 GMT
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java Thu Oct  8 20:57:32 2009
@@ -83,8 +83,8 @@
   }
 
   /**
-   * This is useful for adding to a big index w/ autoCommit
-   * false when you know readers are not using it.
+   * This is useful for adding to a big index when you know
+   * readers are not using it.
    */
   class KeepNoneOnInitDeletionPolicy implements IndexDeletionPolicy {
     int numOnInit;
@@ -202,12 +202,11 @@
 
     final double SECONDS = 2.0;
 
-    boolean autoCommit = false;
     boolean useCompoundFile = true;
 
     Directory dir = new RAMDirectory();
     ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
-    IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setUseCompoundFile(useCompoundFile);
     writer.close();
 
@@ -216,7 +215,7 @@
       // Record last time when writer performed deletes of
       // past commits
       lastDeleteTime = System.currentTimeMillis();
-      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       for(int j=0;j<17;j++) {
         addDoc(writer);
@@ -267,10 +266,9 @@
    */
   public void testKeepAllDeletionPolicy() throws IOException {
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       // Never deletes a commit
       KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy();
@@ -278,37 +276,30 @@
       Directory dir = new RAMDirectory();
       policy.dir = dir;
 
-      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       writer.setMergeScheduler(new SerialMergeScheduler());
       for(int i=0;i<107;i++) {
         addDoc(writer);
-        if (autoCommit && i%10 == 0)
-          writer.commit();
       }
       writer.close();
 
-      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
       writer.close();
 
       assertEquals(2, policy.numOnInit);
-      if (!autoCommit)
-        // If we are not auto committing then there should
-        // be exactly 2 commits (one per close above):
-        assertEquals(2, policy.numOnCommit);
+
+      // If we are not auto committing then there should
+      // be exactly 2 commits (one per close above):
+      assertEquals(2, policy.numOnCommit);
 
       // Test listCommits
       Collection commits = IndexReader.listCommits(dir);
-      if (!autoCommit)
-        // 1 from opening writer + 2 from closing writer
-        assertEquals(3, commits.size());
-      else
-        // 1 from opening writer + 2 from closing writer +
-        // 11 from calling writer.commit() explicitly above
-        assertEquals(14, commits.size());
+      // 1 from opening writer + 2 from closing writer
+      assertEquals(3, commits.size());
 
       Iterator it = commits.iterator();
       // Make sure we can open a reader on each commit:
@@ -448,21 +439,20 @@
 
 
   /* Test keeping NO commit points.  This is a viable and
-   * useful case eg where you want to build a big index with
-   * autoCommit false and you know there are no readers.
+   * useful case eg where you want to build a big index and
+   * you know there are no readers.
    */
   public void testKeepNoneOnInitDeletionPolicy() throws IOException {
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       KeepNoneOnInitDeletionPolicy policy = new KeepNoneOnInitDeletionPolicy();
 
       Directory dir = new RAMDirectory();
 
-      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       for(int i=0;i<107;i++) {
@@ -470,16 +460,15 @@
       }
       writer.close();
 
-      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
       writer.close();
 
       assertEquals(2, policy.numOnInit);
-      if (!autoCommit)
-        // If we are not auto committing then there should
-        // be exactly 2 commits (one per close above):
-        assertEquals(2, policy.numOnCommit);
+      // If we are not auto committing then there should
+      // be exactly 2 commits (one per close above):
+      assertEquals(2, policy.numOnCommit);
 
       // Simplistic check: just verify the index is in fact
       // readable:
@@ -497,17 +486,16 @@
 
     final int N = 5;
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       Directory dir = new RAMDirectory();
 
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       for(int j=0;j<N+1;j++) {
-        IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+        IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setMaxBufferedDocs(10);
         writer.setUseCompoundFile(useCompoundFile);
         for(int i=0;i<17;i++) {
@@ -519,11 +507,7 @@
 
       assertTrue(policy.numDelete > 0);
       assertEquals(N+1, policy.numOnInit);
-      if (autoCommit) {
-        assertTrue(policy.numOnCommit > 1);
-      } else {
-        assertEquals(N+1, policy.numOnCommit);
-      }
+      assertEquals(N+1, policy.numOnCommit);
 
       // Simplistic check: just verify only the past N segments_N's still
       // exist, and, I can open a reader on each:
@@ -559,27 +543,26 @@
 
     final int N = 10;
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       Directory dir = new RAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.close();
       Term searchTerm = new Term("content", "aaa");        
       Query query = new TermQuery(searchTerm);
 
       for(int i=0;i<N+1;i++) {
-        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setUseCompoundFile(useCompoundFile);
         for(int j=0;j<17;j++) {
           addDoc(writer);
         }
-        // this is a commit when autoCommit=false:
+        // this is a commit
         writer.close();
         IndexReader reader = IndexReader.open(dir, policy, false);
         reader.deleteDocument(3*i+1);
@@ -587,19 +570,18 @@
         IndexSearcher searcher = new IndexSearcher(reader);
         ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
         assertEquals(16*(1+i), hits.length);
-        // this is a commit when autoCommit=false:
+        // this is a commit
         reader.close();
         searcher.close();
       }
-      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
-      // this is a commit when autoCommit=false:
+      // this is a commit
       writer.close();
 
       assertEquals(2*(N+2), policy.numOnInit);
-      if (!autoCommit)
-        assertEquals(2*(N+2)-1, policy.numOnCommit);
+      assertEquals(2*(N+2)-1, policy.numOnCommit);
 
       IndexSearcher searcher = new IndexSearcher(dir, false);
       ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -617,21 +599,18 @@
           IndexReader reader = IndexReader.open(dir, true);
 
           // Work backwards in commits on what the expected
-          // count should be.  Only check this in the
-          // autoCommit false case:
-          if (!autoCommit) {
-            searcher = new IndexSearcher(reader);
-            hits = searcher.search(query, null, 1000).scoreDocs;
-            if (i > 1) {
-              if (i % 2 == 0) {
-                expectedCount += 1;
-              } else {
-                expectedCount -= 17;
-              }
+          // count should be.
+          searcher = new IndexSearcher(reader);
+          hits = searcher.search(query, null, 1000).scoreDocs;
+          if (i > 1) {
+            if (i % 2 == 0) {
+              expectedCount += 1;
+            } else {
+              expectedCount -= 17;
             }
-            assertEquals(expectedCount, hits.length);
-            searcher.close();
           }
+          assertEquals(expectedCount, hits.length);
+          searcher.close();
           reader.close();
           if (i == N) {
             fail("should have failed on commits before last 5");
@@ -659,15 +638,14 @@
 
     final int N = 10;
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       Directory dir = new RAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       writer.close();
@@ -676,13 +654,13 @@
 
       for(int i=0;i<N+1;i++) {
 
-        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setMaxBufferedDocs(10);
         writer.setUseCompoundFile(useCompoundFile);
         for(int j=0;j<17;j++) {
           addDoc(writer);
         }
-        // this is a commit when autoCommit=false:
+        // this is a commit
         writer.close();
         IndexReader reader = IndexReader.open(dir, policy, false);
         reader.deleteDocument(3);
@@ -690,19 +668,18 @@
         IndexSearcher searcher = new IndexSearcher(reader);
         ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
         assertEquals(16, hits.length);
-        // this is a commit when autoCommit=false:
+        // this is a commit
         reader.close();
         searcher.close();
 
-        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         // This will not commit: there are no changes
         // pending because we opened for "create":
         writer.close();
       }
 
       assertEquals(1+3*(N+1), policy.numOnInit);
-      if (!autoCommit)
-        assertEquals(3*(N+1), policy.numOnCommit);
+      assertEquals(3*(N+1), policy.numOnCommit);
 
       IndexSearcher searcher = new IndexSearcher(dir, false);
       ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -720,20 +697,17 @@
           IndexReader reader = IndexReader.open(dir, true);
 
           // Work backwards in commits on what the expected
-          // count should be.  Only check this in the
-          // autoCommit false case:
-          if (!autoCommit) {
-            searcher = new IndexSearcher(reader);
-            hits = searcher.search(query, null, 1000).scoreDocs;
-            assertEquals(expectedCount, hits.length);
-            searcher.close();
-            if (expectedCount == 0) {
-              expectedCount = 16;
-            } else if (expectedCount == 16) {
-              expectedCount = 17;
-            } else if (expectedCount == 17) {
-              expectedCount = 0;
-            }
+          // count should be.
+          searcher = new IndexSearcher(reader);
+          hits = searcher.search(query, null, 1000).scoreDocs;
+          assertEquals(expectedCount, hits.length);
+          searcher.close();
+          if (expectedCount == 0) {
+            expectedCount = 16;
+          } else if (expectedCount == 16) {
+            expectedCount = 17;
+          } else if (expectedCount == 17) {
+            expectedCount = 0;
           }
           reader.close();
           if (i == N) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java Thu Oct  8 20:57:32 2009
@@ -117,7 +117,7 @@
         reader.close();
 
         // optimize the index and check that the new doc count is correct
-        writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
         assertEquals(100, writer.maxDoc());
         assertEquals(60, writer.numDocs());
         writer.optimize();
@@ -227,7 +227,7 @@
         startDiskUsage += startDir.fileLength(files[i]);
       }
 
-      for(int iter=0;iter<6;iter++) {
+      for(int iter=0;iter<3;iter++) {
 
         if (debug)
           System.out.println("TEST: iter=" + iter);
@@ -235,8 +235,7 @@
         // Start with 100 bytes more than we are currently using:
         long diskFree = diskUsage+100;
 
-        boolean autoCommit = iter % 2 == 0;
-        int method = iter/2;
+        int method = iter;
 
         boolean success = false;
         boolean done = false;
@@ -254,7 +253,7 @@
 
           // Make a new dir that will enforce disk usage:
           MockRAMDirectory dir = new MockRAMDirectory(startDir);
-          writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
+          writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
           IOException err = null;
 
           MergeScheduler ms = writer.getMergeScheduler();
@@ -290,12 +289,12 @@
                 rate = 0.0;
               }
               if (debug)
-                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes autoCommit=" + autoCommit;
+                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
             } else {
               thisDiskFree = 0;
               rate = 0.0;
               if (debug)
-                testName = "disk full test " + methodName + " with unlimited disk space autoCommit=" + autoCommit;
+                testName = "disk full test " + methodName + " with unlimited disk space";
             }
 
             if (debug)
@@ -351,29 +350,6 @@
             // ConcurrentMergeScheduler are done
             _TestUtil.syncConcurrentMerges(writer);
 
-            if (autoCommit) {
-
-              // Whether we succeeded or failed, check that
-              // all un-referenced files were in fact
-              // deleted (ie, we did not create garbage).
-              // Only check this when autoCommit is true:
-              // when it's false, it's expected that there
-              // are unreferenced files (ie they won't be
-              // referenced until the "commit on close").
-              // Just create a new IndexFileDeleter, have it
-              // delete unreferenced files, then verify that
-              // in fact no files were deleted:
-
-              String successStr;
-              if (success) {
-                successStr = "success";
-              } else {
-                successStr = "IOException";
-              }
-              String message = methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes)";
-              assertNoUnreferencedFiles(dir, message);
-            }
-
             if (debug) {
               System.out.println("  now test readers");
             }
@@ -390,10 +366,8 @@
             }
             int result = reader.docFreq(searchTerm);
             if (success) {
-              if (autoCommit && result != END_COUNT) {
-                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
-              } else if (!autoCommit && result != START_COUNT) {
-                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " [autoCommit = false]");
+              if (result != START_COUNT) {
+                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
               }
             } else {
               // On hitting exception we still may have added
@@ -480,18 +454,17 @@
 
       boolean debug = false;
 
-      for(int pass=0;pass<3;pass++) {
+      for(int pass=0;pass<2;pass++) {
         if (debug)
           System.out.println("TEST: pass=" + pass);
-        boolean autoCommit = pass == 0;
-        boolean doAbort = pass == 2;
+        boolean doAbort = pass == 1;
         long diskFree = 200;
         while(true) {
           if (debug)
             System.out.println("TEST: cycle: diskFree=" + diskFree);
           MockRAMDirectory dir = new MockRAMDirectory();
           dir.setMaxSizeInBytes(diskFree);
-          IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+          IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
 
           MergeScheduler ms = writer.getMergeScheduler();
           if (ms instanceof ConcurrentMergeScheduler)
@@ -531,7 +504,7 @@
 
             _TestUtil.syncConcurrentMerges(ms);
 
-            assertNoUnreferencedFiles(dir, "after disk full during addDocument with autoCommit=" + autoCommit);
+            assertNoUnreferencedFiles(dir, "after disk full during addDocument");
 
             // Make sure reader can open the index:
             IndexReader.open(dir, true).close();
@@ -947,10 +920,9 @@
     }
 
     /*
-     * Simple test for "commit on close": open writer with
-     * autoCommit=false, so it will only commit on close,
-     * then add a bunch of docs, making sure reader does not
-     * see these docs until writer is closed.
+     * Simple test for "commit on close": open writer then
+     * add a bunch of docs, making sure reader does not see
+     * these docs until writer is closed.
      */
     public void testCommitOnClose() throws IOException {
         Directory dir = new RAMDirectory();      
@@ -975,7 +947,7 @@
           }
           searcher = new IndexSearcher(dir, false);
           hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-          assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length);
+          assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
           searcher.close();
           assertTrue("reader should have still been current", reader.isCurrent());
         }
@@ -991,10 +963,9 @@
     }
 
     /*
-     * Simple test for "commit on close": open writer with
-     * autoCommit=false, so it will only commit on close,
-     * then add a bunch of docs, making sure reader does not
-     * see them until writer has closed.  Then instead of
+     * Simple test for "commit on close": open writer, then
+     * add a bunch of docs, making sure reader does not see
+     * them until writer has closed.  Then instead of
      * closing the writer, call abort and verify reader sees
      * nothing was added.  Then verify we can open the index
      * and add docs to it.
@@ -1024,7 +995,7 @@
 
       searcher = new IndexSearcher(dir, false);
       hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-      assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length);
+      assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
       searcher.close();
 
       // Now, close the writer:
@@ -1052,7 +1023,7 @@
         }
         searcher = new IndexSearcher(dir, false);
         hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-        assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length);
+        assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
         searcher.close();
       }
 
@@ -1103,9 +1074,9 @@
       // and it doesn't delete intermediate segments then it
       // will exceed this 100X:
       // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
-      assertTrue("writer used too much space while adding documents when autoCommit=false: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage,
+      assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage,
                  midDiskUsage < 100*startDiskUsage);
-      assertTrue("writer used too much space after close when autoCommit=false endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage,
+      assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage,
                  endDiskUsage < 100*startDiskUsage);
     }
 
@@ -2116,15 +2087,15 @@
     Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
 
-    for(int pass=0;pass<3;pass++) {
-      boolean autoCommit = pass%2 == 0;
-      IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true);
+    for(int pass=0;pass<2;pass++) {
+
+      IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
 
-      //System.out.println("TEST: pass=" + pass + " ac=" + autoCommit + " cms=" + (pass >= 2));
+      //System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
       for(int iter=0;iter<10;iter++) {
         //System.out.println("TEST: iter=" + iter);
         MergeScheduler ms;
-        if (pass >= 2)
+        if (pass == 1)
           ms = new ConcurrentMergeScheduler();
         else
           ms = new SerialMergeScheduler();
@@ -2189,7 +2160,7 @@
         reader.close();
 
         // Reopen
-        writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false);
+        writer = new IndexWriter(directory, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
       }
       writer.close();
     }
@@ -2360,7 +2331,7 @@
 
     for(int iter=0;iter<10;iter++) {
       MockRAMDirectory dir = new MockRAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
       ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
       // We expect disk full exceptions in the merge threads
       cms.setSuppressExceptions();
@@ -2421,7 +2392,7 @@
   public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
     MockRAMDirectory dir = new MockRAMDirectory();
 
-    IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(2);
     final Document doc = new Document();
     doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
@@ -2434,6 +2405,7 @@
     try {
       writer.addDocument(doc);
       writer.addDocument(doc);
+      writer.commit();
       fail("did not hit exception");
     } catch (IOException ioe) {
     }
@@ -2721,7 +2693,7 @@
     FailOnlyInSync failure = new FailOnlyInSync();
     dir.failOn(failure);
 
-    IndexWriter writer  = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     failure.setDoFail();
 
     ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
@@ -2731,8 +2703,16 @@
     writer.setMaxBufferedDocs(2);
     writer.setMergeFactor(5);
 
-    for (int i = 0; i < 23; i++)
+    for (int i = 0; i < 23; i++) {
       addDoc(writer);
+      if ((i-1)%2 == 0) {
+        try {
+          writer.commit();
+        } catch (IOException ioe) {
+          // expected
+        }
+      }
+    }
 
     cms.sync();
     assertTrue(failure.didFail);
@@ -2749,10 +2729,9 @@
   public void testTermVectorCorruption() throws IOException {
 
     Directory dir = new MockRAMDirectory();
-    for(int iter=0;iter<4;iter++) {
-      final boolean autoCommit = 1==iter/2;
+    for(int iter=0;iter<2;iter++) {
       IndexWriter writer = new IndexWriter(dir,
-                                           autoCommit, new StandardAnalyzer());
+                                           new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(2);
       writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
       writer.setMergeScheduler(new SerialMergeScheduler());
@@ -2785,7 +2764,7 @@
       reader.close();
 
       writer = new IndexWriter(dir,
-                               autoCommit, new StandardAnalyzer());
+                               new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(2);
       writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
       writer.setMergeScheduler(new SerialMergeScheduler());
@@ -2801,10 +2780,9 @@
   // LUCENE-1168
   public void testTermVectorCorruption2() throws IOException {
     Directory dir = new MockRAMDirectory();
-    for(int iter=0;iter<4;iter++) {
-      final boolean autoCommit = 1==iter/2;
+    for(int iter=0;iter<2;iter++) {
       IndexWriter writer = new IndexWriter(dir,
-                                           autoCommit, new StandardAnalyzer());
+                                           new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(2);
       writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
       writer.setMergeScheduler(new SerialMergeScheduler());
@@ -3049,7 +3027,7 @@
   // LUCENE-1179
   public void testEmptyFieldName() throws IOException {
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     Document doc = new Document();
     doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
     writer.addDocument(doc);
@@ -4034,7 +4012,7 @@
 
     final List thrown = new ArrayList();
 
-    final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer()) {
+    final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED) {
         public void message(final String message) {
           if (message.startsWith("now flush at close") && 0 == thrown.size()) {
             thrown.add(null);
@@ -4324,7 +4302,7 @@
 
   public void testDeadlock() throws Exception {
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(2);
     Document doc = new Document();
     doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Thu Oct  8 20:57:32 2009
@@ -40,285 +40,262 @@
         "Venice has lots of canals" };
     String[] text = { "Amsterdam", "Venice" };
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setUseCompoundFile(true);
+    modifier.setMaxBufferedDeleteTerms(1);
 
-      Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setUseCompoundFile(true);
-      modifier.setMaxBufferedDeleteTerms(1);
-
-      for (int i = 0; i < keywords.length; i++) {
-        Document doc = new Document();
-        doc.add(new Field("id", keywords[i], Field.Store.YES,
-                          Field.Index.NOT_ANALYZED));
-        doc.add(new Field("country", unindexed[i], Field.Store.YES,
-                          Field.Index.NO));
-        doc.add(new Field("contents", unstored[i], Field.Store.NO,
-                          Field.Index.ANALYZED));
-        doc
-          .add(new Field("city", text[i], Field.Store.YES,
-                         Field.Index.ANALYZED));
-        modifier.addDocument(doc);
-      }
-      modifier.optimize();
-      modifier.commit();
+    for (int i = 0; i < keywords.length; i++) {
+      Document doc = new Document();
+      doc.add(new Field("id", keywords[i], Field.Store.YES,
+                        Field.Index.NOT_ANALYZED));
+      doc.add(new Field("country", unindexed[i], Field.Store.YES,
+                        Field.Index.NO));
+      doc.add(new Field("contents", unstored[i], Field.Store.NO,
+                        Field.Index.ANALYZED));
+      doc
+        .add(new Field("city", text[i], Field.Store.YES,
+                       Field.Index.ANALYZED));
+      modifier.addDocument(doc);
+    }
+    modifier.optimize();
+    modifier.commit();
 
-      Term term = new Term("city", "Amsterdam");
-      int hitCount = getHitCount(dir, term);
-      assertEquals(1, hitCount);
-      modifier.deleteDocuments(term);
-      modifier.commit();
-      hitCount = getHitCount(dir, term);
-      assertEquals(0, hitCount);
+    Term term = new Term("city", "Amsterdam");
+    int hitCount = getHitCount(dir, term);
+    assertEquals(1, hitCount);
+    modifier.deleteDocuments(term);
+    modifier.commit();
+    hitCount = getHitCount(dir, term);
+    assertEquals(0, hitCount);
 
-      modifier.close();
-      dir.close();
-    }
+    modifier.close();
+    dir.close();
   }
 
   // test when delete terms only apply to disk segments
   public void testNonRAMDelete() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
 
-      Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setMaxBufferedDocs(2);
-      modifier.setMaxBufferedDeleteTerms(2);
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setMaxBufferedDocs(2);
+    modifier.setMaxBufferedDeleteTerms(2);
 
-      int id = 0;
-      int value = 100;
+    int id = 0;
+    int value = 100;
 
-      for (int i = 0; i < 7; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.commit();
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
 
-      assertEquals(0, modifier.getNumBufferedDocuments());
-      assertTrue(0 < modifier.getSegmentCount());
+    assertEquals(0, modifier.getNumBufferedDocuments());
+    assertTrue(0 < modifier.getSegmentCount());
 
-      modifier.commit();
+    modifier.commit();
 
-      IndexReader reader = IndexReader.open(dir, true);
-      assertEquals(7, reader.numDocs());
-      reader.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
 
-      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
 
-      modifier.commit();
+    modifier.commit();
 
-      reader = IndexReader.open(dir, true);
-      assertEquals(0, reader.numDocs());
-      reader.close();
-      modifier.close();
-      dir.close();
-    }
+    reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+    modifier.close();
+    dir.close();
   }
 
   public void testMaxBufferedDeletes() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-      Directory dir = new MockRAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, autoCommit,
-                                           new WhitespaceAnalyzer(), true);
-      writer.setMaxBufferedDeleteTerms(1);
-      writer.deleteDocuments(new Term("foobar", "1"));
-      writer.deleteDocuments(new Term("foobar", "1"));
-      writer.deleteDocuments(new Term("foobar", "1"));
-      assertEquals(3, writer.getFlushDeletesCount());
-      writer.close();
-      dir.close();
-    }
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir,
+                                         new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    writer.setMaxBufferedDeleteTerms(1);
+    writer.deleteDocuments(new Term("foobar", "1"));
+    writer.deleteDocuments(new Term("foobar", "1"));
+    writer.deleteDocuments(new Term("foobar", "1"));
+    assertEquals(3, writer.getFlushDeletesCount());
+    writer.close();
+    dir.close();
   }
 
   // test when delete terms only apply to ram segments
   public void testRAMDeletes() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      for(int t=0;t<2;t++) {
-        boolean autoCommit = (0==pass);
-        Directory dir = new MockRAMDirectory();
-        IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                               new WhitespaceAnalyzer(), true);
-        modifier.setMaxBufferedDocs(4);
-        modifier.setMaxBufferedDeleteTerms(4);
-
-        int id = 0;
-        int value = 100;
-
-        addDoc(modifier, ++id, value);
-        if (0 == t)
-          modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-        else
-          modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
-        addDoc(modifier, ++id, value);
-        if (0 == t) {
-          modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-          assertEquals(2, modifier.getNumBufferedDeleteTerms());
-          assertEquals(1, modifier.getBufferedDeleteTermsSize());
-        }
-        else
-          modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
-
-        addDoc(modifier, ++id, value);
-        assertEquals(0, modifier.getSegmentCount());
-        modifier.flush();
-
-        modifier.commit();
-
-        IndexReader reader = IndexReader.open(dir, true);
-        assertEquals(1, reader.numDocs());
-
-        int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
-        assertEquals(1, hitCount);
-        reader.close();
-        modifier.close();
-        dir.close();
-      }
-    }
-  }
-
-  // test when delete terms apply to both disk and ram segments
-  public void testBothDeletes() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-
+    for(int t=0;t<2;t++) {
       Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setMaxBufferedDocs(100);
-      modifier.setMaxBufferedDeleteTerms(100);
+      IndexWriter modifier = new IndexWriter(dir,
+                                             new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+      modifier.setMaxBufferedDocs(4);
+      modifier.setMaxBufferedDeleteTerms(4);
 
       int id = 0;
       int value = 100;
 
-      for (int i = 0; i < 5; i++) {
-        addDoc(modifier, ++id, value);
+      addDoc(modifier, ++id, value);
+      if (0 == t)
+        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+      else
+        modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
+      addDoc(modifier, ++id, value);
+      if (0 == t) {
+        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+        assertEquals(2, modifier.getNumBufferedDeleteTerms());
+        assertEquals(1, modifier.getBufferedDeleteTermsSize());
       }
+      else
+        modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
 
-      value = 200;
-      for (int i = 0; i < 5; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.commit();
-
-      for (int i = 0; i < 5; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+      addDoc(modifier, ++id, value);
+      assertEquals(0, modifier.getSegmentCount());
+      modifier.flush();
 
       modifier.commit();
 
       IndexReader reader = IndexReader.open(dir, true);
-      assertEquals(5, reader.numDocs());
+      assertEquals(1, reader.numDocs());
+
+      int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
+      assertEquals(1, hitCount);
+      reader.close();
       modifier.close();
+      dir.close();
     }
   }
 
+  // test when delete terms apply to both disk and ram segments
+  public void testBothDeletes() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setMaxBufferedDocs(100);
+    modifier.setMaxBufferedDeleteTerms(100);
+
+    int id = 0;
+    int value = 100;
+
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
+
+    value = 200;
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
+
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+
+    modifier.commit();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(5, reader.numDocs());
+    modifier.close();
+  }
+
   // test that batched delete terms are flushed together
   public void testBatchDeletes() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-      Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setMaxBufferedDocs(2);
-      modifier.setMaxBufferedDeleteTerms(2);
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setMaxBufferedDocs(2);
+    modifier.setMaxBufferedDeleteTerms(2);
 
-      int id = 0;
-      int value = 100;
+    int id = 0;
+    int value = 100;
 
-      for (int i = 0; i < 7; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.commit();
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
 
-      IndexReader reader = IndexReader.open(dir, true);
-      assertEquals(7, reader.numDocs());
-      reader.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
       
-      id = 0;
-      modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
-      modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
+    id = 0;
+    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
+    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
 
-      modifier.commit();
-
-      reader = IndexReader.open(dir, true);
-      assertEquals(5, reader.numDocs());
-      reader.close();
+    modifier.commit();
 
-      Term[] terms = new Term[3];
-      for (int i = 0; i < terms.length; i++) {
-        terms[i] = new Term("id", String.valueOf(++id));
-      }
-      modifier.deleteDocuments(terms);
-      modifier.commit();
-      reader = IndexReader.open(dir, true);
-      assertEquals(2, reader.numDocs());
-      reader.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(5, reader.numDocs());
+    reader.close();
 
-      modifier.close();
-      dir.close();
+    Term[] terms = new Term[3];
+    for (int i = 0; i < terms.length; i++) {
+      terms[i] = new Term("id", String.valueOf(++id));
     }
+    modifier.deleteDocuments(terms);
+    modifier.commit();
+    reader = IndexReader.open(dir, true);
+    assertEquals(2, reader.numDocs());
+    reader.close();
+
+    modifier.close();
+    dir.close();
   }
 
   // test deleteAll()
   public void testDeleteAll() throws IOException {
-    for (int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-      Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setMaxBufferedDocs(2);
-      modifier.setMaxBufferedDeleteTerms(2);
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setMaxBufferedDocs(2);
+    modifier.setMaxBufferedDeleteTerms(2);
 
-      int id = 0;
-      int value = 100;
+    int id = 0;
+    int value = 100;
 
-      for (int i = 0; i < 7; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.commit();
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
 
-      IndexReader reader = IndexReader.open(dir, true);
-      assertEquals(7, reader.numDocs());
-      reader.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
 
-      // Add 1 doc (so we will have something buffered)
-      addDoc(modifier, 99, value);
+    // Add 1 doc (so we will have something buffered)
+    addDoc(modifier, 99, value);
 
-      // Delete all
-      modifier.deleteAll();
+    // Delete all
+    modifier.deleteAll();
 
-      // Delete all shouldn't be on disk yet
-      reader = IndexReader.open(dir, true);
-      assertEquals(7, reader.numDocs());
-      reader.close();
+    // Delete all shouldn't be on disk yet
+    reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
 
-      // Add a doc and update a doc (after the deleteAll, before the commit)
-      addDoc(modifier, 101, value);
-      updateDoc(modifier, 102, value);
+    // Add a doc and update a doc (after the deleteAll, before the commit)
+    addDoc(modifier, 101, value);
+    updateDoc(modifier, 102, value);
 
-      // commit the delete all
-      modifier.commit();
+    // commit the delete all
+    modifier.commit();
 
-      // Validate there are no docs left
-      reader = IndexReader.open(dir, true);
-      assertEquals(2, reader.numDocs());
-      reader.close();
+    // Validate there are no docs left
+    reader = IndexReader.open(dir, true);
+    assertEquals(2, reader.numDocs());
+    reader.close();
 
-      modifier.close();
-      dir.close();
-    }
+    modifier.close();
+    dir.close();
   }
 
   // test rollback of deleteAll()
   public void testDeleteAllRollback() throws IOException {
     Directory dir = new MockRAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir, false,
-                                           new WhitespaceAnalyzer(), true);
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     modifier.setMaxBufferedDocs(2);
     modifier.setMaxBufferedDeleteTerms(2);
     
@@ -355,8 +332,8 @@
   // test deleteAll() w/ near real-time reader
   public void testDeleteAllNRT() throws IOException {
     Directory dir = new MockRAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir, false,
-                                           new WhitespaceAnalyzer(), true);
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     modifier.setMaxBufferedDocs(2);
     modifier.setMaxBufferedDeleteTerms(2);
     
@@ -445,187 +422,183 @@
     int START_COUNT = 157;
     int END_COUNT = 144;
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-
-      // First build up a starting index:
-      MockRAMDirectory startDir = new MockRAMDirectory();
-      IndexWriter writer = new IndexWriter(startDir, autoCommit,
-                                           new WhitespaceAnalyzer(), true);
-      for (int i = 0; i < 157; i++) {
-        Document d = new Document();
-        d.add(new Field("id", Integer.toString(i), Field.Store.YES,
-                        Field.Index.NOT_ANALYZED));
-        d.add(new Field("content", "aaa " + i, Field.Store.NO,
-                        Field.Index.ANALYZED));
-        writer.addDocument(d);
-      }
-      writer.close();
-
-      long diskUsage = startDir.sizeInBytes();
-      long diskFree = diskUsage + 10;
-
-      IOException err = null;
-
-      boolean done = false;
-
-      // Iterate w/ ever increasing free disk space:
-      while (!done) {
-        MockRAMDirectory dir = new MockRAMDirectory(startDir);
-        dir.setPreventDoubleWrite(false);
-        IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                               new WhitespaceAnalyzer());
-
-        modifier.setMaxBufferedDocs(1000); // use flush or close
-        modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
-
-        // For each disk size, first try to commit against
-        // dir that will hit random IOExceptions & disk
-        // full; after, give it infinite disk space & turn
-        // off random IOExceptions & retry w/ same reader:
-        boolean success = false;
-
-        for (int x = 0; x < 2; x++) {
-
-          double rate = 0.1;
-          double diskRatio = ((double)diskFree) / diskUsage;
-          long thisDiskFree;
-          String testName;
-
-          if (0 == x) {
-            thisDiskFree = diskFree;
-            if (diskRatio >= 2.0) {
-              rate /= 2;
-            }
-            if (diskRatio >= 4.0) {
-              rate /= 2;
-            }
-            if (diskRatio >= 6.0) {
-              rate = 0.0;
-            }
-            if (debug) {
-              System.out.println("\ncycle: " + diskFree + " bytes");
-            }
-            testName = "disk full during reader.close() @ " + thisDiskFree
-              + " bytes";
-          } else {
-            thisDiskFree = 0;
+    // First build up a starting index:
+    MockRAMDirectory startDir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(startDir,
+                                         new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    for (int i = 0; i < 157; i++) {
+      Document d = new Document();
+      d.add(new Field("id", Integer.toString(i), Field.Store.YES,
+                      Field.Index.NOT_ANALYZED));
+      d.add(new Field("content", "aaa " + i, Field.Store.NO,
+                      Field.Index.ANALYZED));
+      writer.addDocument(d);
+    }
+    writer.close();
+
+    long diskUsage = startDir.sizeInBytes();
+    long diskFree = diskUsage + 10;
+
+    IOException err = null;
+
+    boolean done = false;
+
+    // Iterate w/ ever increasing free disk space:
+    while (!done) {
+      MockRAMDirectory dir = new MockRAMDirectory(startDir);
+      dir.setPreventDoubleWrite(false);
+      IndexWriter modifier = new IndexWriter(dir,
+                                             new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+
+      modifier.setMaxBufferedDocs(1000); // use flush or close
+      modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
+
+      // For each disk size, first try to commit against
+      // dir that will hit random IOExceptions & disk
+      // full; after, give it infinite disk space & turn
+      // off random IOExceptions & retry w/ same reader:
+      boolean success = false;
+
+      for (int x = 0; x < 2; x++) {
+
+        double rate = 0.1;
+        double diskRatio = ((double)diskFree) / diskUsage;
+        long thisDiskFree;
+        String testName;
+
+        if (0 == x) {
+          thisDiskFree = diskFree;
+          if (diskRatio >= 2.0) {
+            rate /= 2;
+          }
+          if (diskRatio >= 4.0) {
+            rate /= 2;
+          }
+          if (diskRatio >= 6.0) {
             rate = 0.0;
-            if (debug) {
-              System.out.println("\ncycle: same writer: unlimited disk space");
-            }
-            testName = "reader re-use after disk full";
           }
+          if (debug) {
+            System.out.println("\ncycle: " + diskFree + " bytes");
+          }
+          testName = "disk full during reader.close() @ " + thisDiskFree
+            + " bytes";
+        } else {
+          thisDiskFree = 0;
+          rate = 0.0;
+          if (debug) {
+            System.out.println("\ncycle: same writer: unlimited disk space");
+          }
+          testName = "reader re-use after disk full";
+        }
 
-          dir.setMaxSizeInBytes(thisDiskFree);
-          dir.setRandomIOExceptionRate(rate, diskFree);
+        dir.setMaxSizeInBytes(thisDiskFree);
+        dir.setRandomIOExceptionRate(rate, diskFree);
 
-          try {
-            if (0 == x) {
-              int docId = 12;
-              for (int i = 0; i < 13; i++) {
-                if (updates) {
-                  Document d = new Document();
-                  d.add(new Field("id", Integer.toString(i), Field.Store.YES,
-                                  Field.Index.NOT_ANALYZED));
-                  d.add(new Field("content", "bbb " + i, Field.Store.NO,
-                                  Field.Index.ANALYZED));
-                  modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
-                } else { // deletes
-                  modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
-                  // modifier.setNorm(docId, "contents", (float)2.0);
-                }
-                docId += 12;
+        try {
+          if (0 == x) {
+            int docId = 12;
+            for (int i = 0; i < 13; i++) {
+              if (updates) {
+                Document d = new Document();
+                d.add(new Field("id", Integer.toString(i), Field.Store.YES,
+                                Field.Index.NOT_ANALYZED));
+                d.add(new Field("content", "bbb " + i, Field.Store.NO,
+                                Field.Index.ANALYZED));
+                modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
+              } else { // deletes
+                modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
+                // modifier.setNorm(docId, "contents", (float)2.0);
               }
-            }
-            modifier.close();
-            success = true;
-            if (0 == x) {
-              done = true;
+              docId += 12;
             }
           }
-          catch (IOException e) {
-            if (debug) {
-              System.out.println("  hit IOException: " + e);
-              e.printStackTrace(System.out);
-            }
-            err = e;
-            if (1 == x) {
-              e.printStackTrace();
-              fail(testName + " hit IOException after disk space was freed up");
-            }
+          modifier.close();
+          success = true;
+          if (0 == x) {
+            done = true;
           }
-
-          // If the close() succeeded, make sure there are
-          // no unreferenced files.
-          if (success)
-            TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close");
-
-          // Finally, verify index is not corrupt, and, if
-          // we succeeded, we see all docs changed, and if
-          // we failed, we see either all docs or no docs
-          // changed (transactional semantics):
-          IndexReader newReader = null;
-          try {
-            newReader = IndexReader.open(dir, true);
+        }
+        catch (IOException e) {
+          if (debug) {
+            System.out.println("  hit IOException: " + e);
+            e.printStackTrace(System.out);
           }
-          catch (IOException e) {
+          err = e;
+          if (1 == x) {
             e.printStackTrace();
-            fail(testName
-                 + ":exception when creating IndexReader after disk full during close: "
-                 + e);
+            fail(testName + " hit IOException after disk space was freed up");
           }
+        }
 
-          IndexSearcher searcher = new IndexSearcher(newReader);
-          ScoreDoc[] hits = null;
-          try {
-            hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-          }
-          catch (IOException e) {
-            e.printStackTrace();
-            fail(testName + ": exception when searching: " + e);
+        // If the close() succeeded, make sure there are
+        // no unreferenced files.
+        if (success)
+          TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close");
+
+        // Finally, verify index is not corrupt, and, if
+        // we succeeded, we see all docs changed, and if
+        // we failed, we see either all docs or no docs
+        // changed (transactional semantics):
+        IndexReader newReader = null;
+        try {
+          newReader = IndexReader.open(dir, true);
+        }
+        catch (IOException e) {
+          e.printStackTrace();
+          fail(testName
+               + ":exception when creating IndexReader after disk full during close: "
+               + e);
+        }
+
+        IndexSearcher searcher = new IndexSearcher(newReader);
+        ScoreDoc[] hits = null;
+        try {
+          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+        }
+        catch (IOException e) {
+          e.printStackTrace();
+          fail(testName + ": exception when searching: " + e);
+        }
+        int result2 = hits.length;
+        if (success) {
+          if (x == 0 && result2 != END_COUNT) {
+            fail(testName
+                 + ": method did not throw exception but hits.length for search on term 'aaa' is "
+                 + result2 + " instead of expected " + END_COUNT);
+          } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) {
+            // It's possible that the first exception was
+            // "recoverable" wrt pending deletes, in which
+            // case the pending deletes are retained and
+            // then re-flushing (with plenty of disk
+            // space) will succeed in flushing the
+            // deletes:
+            fail(testName
+                 + ": method did not throw exception but hits.length for search on term 'aaa' is "
+                 + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
           }
-          int result2 = hits.length;
-          if (success) {
-            if (x == 0 && result2 != END_COUNT) {
-              fail(testName
-                   + ": method did not throw exception but hits.length for search on term 'aaa' is "
-                   + result2 + " instead of expected " + END_COUNT);
-            } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) {
-              // It's possible that the first exception was
-              // "recoverable" wrt pending deletes, in which
-              // case the pending deletes are retained and
-              // then re-flushing (with plenty of disk
-              // space) will succeed in flushing the
-              // deletes:
-              fail(testName
-                   + ": method did not throw exception but hits.length for search on term 'aaa' is "
-                   + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
-            }
-          } else {
-            // On hitting exception we still may have added
-            // all docs:
-            if (result2 != START_COUNT && result2 != END_COUNT) {
-              err.printStackTrace();
-              fail(testName
-                   + ": method did throw exception but hits.length for search on term 'aaa' is "
-                   + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
-            }
+        } else {
+          // On hitting exception we still may have added
+          // all docs:
+          if (result2 != START_COUNT && result2 != END_COUNT) {
+            err.printStackTrace();
+            fail(testName
+                 + ": method did throw exception but hits.length for search on term 'aaa' is "
+                 + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
           }
+        }
 
-          searcher.close();
-          newReader.close();
+        searcher.close();
+        newReader.close();
 
-          if (result2 == END_COUNT) {
-            break;
-          }
+        if (result2 == END_COUNT) {
+          break;
         }
+      }
 
-        dir.close();
+      dir.close();
 
-        // Try again with 10 more bytes of free space:
-        diskFree += 10;
-      }
+      // Try again with 10 more bytes of free space:
+      diskFree += 10;
     }
   }
 
@@ -677,87 +650,84 @@
         "Venice has lots of canals" };
     String[] text = { "Amsterdam", "Venice" };
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-      MockRAMDirectory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setUseCompoundFile(true);
-      modifier.setMaxBufferedDeleteTerms(2);
-
-      dir.failOn(failure.reset());
-
-      for (int i = 0; i < keywords.length; i++) {
-        Document doc = new Document();
-        doc.add(new Field("id", keywords[i], Field.Store.YES,
-                          Field.Index.NOT_ANALYZED));
-        doc.add(new Field("country", unindexed[i], Field.Store.YES,
-                          Field.Index.NO));
-        doc.add(new Field("contents", unstored[i], Field.Store.NO,
-                          Field.Index.ANALYZED));
-        doc.add(new Field("city", text[i], Field.Store.YES,
-                          Field.Index.ANALYZED));
-        modifier.addDocument(doc);
-      }
-      // flush (and commit if ac)
-
-      modifier.optimize();
-      modifier.commit();
+    MockRAMDirectory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setUseCompoundFile(true);
+    modifier.setMaxBufferedDeleteTerms(2);
 
-      // one of the two files hits
+    dir.failOn(failure.reset());
 
-      Term term = new Term("city", "Amsterdam");
-      int hitCount = getHitCount(dir, term);
-      assertEquals(1, hitCount);
+    for (int i = 0; i < keywords.length; i++) {
+      Document doc = new Document();
+      doc.add(new Field("id", keywords[i], Field.Store.YES,
+                        Field.Index.NOT_ANALYZED));
+      doc.add(new Field("country", unindexed[i], Field.Store.YES,
+                        Field.Index.NO));
+      doc.add(new Field("contents", unstored[i], Field.Store.NO,
+                        Field.Index.ANALYZED));
+      doc.add(new Field("city", text[i], Field.Store.YES,
+                        Field.Index.ANALYZED));
+      modifier.addDocument(doc);
+    }
+    // flush (and commit if ac)
 
-      // open the writer again (closed above)
+    modifier.optimize();
+    modifier.commit();
 
-      // delete the doc
-      // max buf del terms is two, so this is buffered
+    // one of the two files hits
 
-      modifier.deleteDocuments(term);
+    Term term = new Term("city", "Amsterdam");
+    int hitCount = getHitCount(dir, term);
+    assertEquals(1, hitCount);
 
-      // add a doc (needed for the !ac case; see below)
-      // doc remains buffered
+    // open the writer again (closed above)
 
-      Document doc = new Document();
-      modifier.addDocument(doc);
+    // delete the doc
+    // max buf del terms is two, so this is buffered
 
-      // commit the changes, the buffered deletes, and the new doc
+    modifier.deleteDocuments(term);
 
-      // The failure object will fail on the first write after the del
-      // file gets created when processing the buffered delete
+    // add a doc (needed for the !ac case; see below)
+    // doc remains buffered
 
-      // in the ac case, this will be when writing the new segments
-      // files so we really don't need the new doc, but it's harmless
+    Document doc = new Document();
+    modifier.addDocument(doc);
 
-      // in the !ac case, a new segments file won't be created but in
-      // this case, creation of the cfs file happens next so we need
-      // the doc (to test that it's okay that we don't lose deletes if
-      // failing while creating the cfs file)
+    // commit the changes, the buffered deletes, and the new doc
 
-      boolean failed = false;
-      try {
-        modifier.commit();
-      } catch (IOException ioe) {
-        failed = true;
-      }
+    // The failure object will fail on the first write after the del
+    // file gets created when processing the buffered delete
 
-      assertTrue(failed);
+    // in the ac case, this will be when writing the new segments
+    // files so we really don't need the new doc, but it's harmless
 
-      // The commit above failed, so we need to retry it (which will
-      // succeed, because the failure is a one-shot)
+    // in the !ac case, a new segments file won't be created but in
+    // this case, creation of the cfs file happens next so we need
+    // the doc (to test that it's okay that we don't lose deletes if
+    // failing while creating the cfs file)
 
+    boolean failed = false;
+    try {
       modifier.commit();
+    } catch (IOException ioe) {
+      failed = true;
+    }
 
-      hitCount = getHitCount(dir, term);
+    assertTrue(failed);
 
-      // Make sure the delete was successfully flushed:
-      assertEquals(0, hitCount);
+    // The commit above failed, so we need to retry it (which will
+    // succeed, because the failure is a one-shot)
 
-      modifier.close();
-      dir.close();
-    }
+    modifier.commit();
+
+    hitCount = getHitCount(dir, term);
+
+    // Make sure the delete was successfully flushed:
+    assertEquals(0, hitCount);
+
+    modifier.close();
+    dir.close();
   }
 
   // This test tests that the files created by the docs writer before
@@ -787,47 +757,43 @@
         "Venice has lots of canals" };
     String[] text = { "Amsterdam", "Venice" };
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-      MockRAMDirectory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-
-      dir.failOn(failure.reset());
-
-      for (int i = 0; i < keywords.length; i++) {
-        Document doc = new Document();
-        doc.add(new Field("id", keywords[i], Field.Store.YES,
-                          Field.Index.NOT_ANALYZED));
-        doc.add(new Field("country", unindexed[i], Field.Store.YES,
-                          Field.Index.NO));
-        doc.add(new Field("contents", unstored[i], Field.Store.NO,
-                          Field.Index.ANALYZED));
-        doc.add(new Field("city", text[i], Field.Store.YES,
-                          Field.Index.ANALYZED));
-        try {
-          modifier.addDocument(doc);
-        } catch (IOException io) {
-          break;
-        }
-      }
+    MockRAMDirectory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+
+    dir.failOn(failure.reset());
 
-      String[] startFiles = dir.listAll();
-      SegmentInfos infos = new SegmentInfos();
-      infos.read(dir);
-      new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
-      String[] endFiles = dir.listAll();
-
-      if (!Arrays.equals(startFiles, endFiles)) {
-        fail("docswriter abort() failed to delete unreferenced files:\n  before delete:\n    "
-             + arrayToString(startFiles) + "\n  after delete:\n    "
-             + arrayToString(endFiles));
+    for (int i = 0; i < keywords.length; i++) {
+      Document doc = new Document();
+      doc.add(new Field("id", keywords[i], Field.Store.YES,
+                        Field.Index.NOT_ANALYZED));
+      doc.add(new Field("country", unindexed[i], Field.Store.YES,
+                        Field.Index.NO));
+      doc.add(new Field("contents", unstored[i], Field.Store.NO,
+                        Field.Index.ANALYZED));
+      doc.add(new Field("city", text[i], Field.Store.YES,
+                        Field.Index.ANALYZED));
+      try {
+        modifier.addDocument(doc);
+      } catch (IOException io) {
+        break;
       }
+    }
 
-      modifier.close();
+    String[] startFiles = dir.listAll();
+    SegmentInfos infos = new SegmentInfos();
+    infos.read(dir);
+    new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
+    String[] endFiles = dir.listAll();
 
+    if (!Arrays.equals(startFiles, endFiles)) {
+      fail("docswriter abort() failed to delete unreferenced files:\n  before delete:\n    "
+           + arrayToString(startFiles) + "\n  after delete:\n    "
+           + arrayToString(endFiles));
     }
 
+    modifier.close();
+
   }
 
   private String arrayToString(String[] l) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java Thu Oct  8 20:57:32 2009
@@ -125,7 +125,7 @@
   public void testMaxBufferedDocsChange() throws IOException {
     Directory dir = new RAMDirectory();
 
-    IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(101);
     writer.setMergeFactor(101);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -139,7 +139,7 @@
       }
       writer.close();
 
-      writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(101);
       writer.setMergeFactor(101);
       writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -158,6 +158,9 @@
     for (int i = 100; i < 1000; i++) {
       addDoc(writer);
     }
+    writer.commit();
+    ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
+    writer.commit();
     checkInvariants(writer);
 
     writer.close();
@@ -167,7 +170,7 @@
   public void testMergeDocCount0() throws IOException {
     Directory dir = new RAMDirectory();
 
-    IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
     writer.setMaxBufferedDocs(10);
     writer.setMergeFactor(100);
@@ -182,7 +185,7 @@
     reader.deleteDocuments(new Term("content", "aaa"));
     reader.close();
 
-    writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
     writer.setMaxBufferedDocs(10);
     writer.setMergeFactor(5);
@@ -191,6 +194,9 @@
     for (int i = 0; i < 10; i++) {
       addDoc(writer);
     }
+    writer.commit();
+    ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
+    writer.commit();
     checkInvariants(writer);
     assertEquals(10, writer.docCount());
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java Thu Oct  8 20:57:32 2009
@@ -115,8 +115,8 @@
     Run one indexer and 2 searchers against single index as
     stress test.
   */
-  public void runStressTest(Directory directory, boolean autoCommit, MergeScheduler mergeScheduler) throws Exception {
-    IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true);
+  public void runStressTest(Directory directory, MergeScheduler mergeScheduler) throws Exception {
+    IndexWriter modifier = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
 
     modifier.setMaxBufferedDocs(10);
 
@@ -166,35 +166,15 @@
   public void testStressIndexAndSearching() throws Exception {
     RANDOM = newRandom();
 
-    // RAMDir
-    Directory directory = new MockRAMDirectory();
-    runStressTest(directory, true, null);
-    directory.close();
-
-    // FSDir
-    File dirPath = _TestUtil.getTempDir("lucene.test.stress");
-    directory = FSDirectory.open(dirPath);
-    runStressTest(directory, true, null);
-    directory.close();
-
     // With ConcurrentMergeScheduler, in RAMDir
-    directory = new MockRAMDirectory();
-    runStressTest(directory, true, new ConcurrentMergeScheduler());
+    Directory directory = new MockRAMDirectory();
+    runStressTest(directory, new ConcurrentMergeScheduler());
     directory.close();
 
     // With ConcurrentMergeScheduler, in FSDir
+    File dirPath = _TestUtil.getTempDir("lucene.test.stress");
     directory = FSDirectory.open(dirPath);
-    runStressTest(directory, true, new ConcurrentMergeScheduler());
-    directory.close();
-
-    // With ConcurrentMergeScheduler and autoCommit=false, in RAMDir
-    directory = new MockRAMDirectory();
-    runStressTest(directory, false, new ConcurrentMergeScheduler());
-    directory.close();
-
-    // With ConcurrentMergeScheduler and autoCommit=false, in FSDir
-    directory = FSDirectory.open(dirPath);
-    runStressTest(directory, false, new ConcurrentMergeScheduler());
+    runStressTest(directory, new ConcurrentMergeScheduler());
     directory.close();
 
     _TestUtil.rmDir(dirPath);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java Thu Oct  8 20:57:32 2009
@@ -32,7 +32,6 @@
   static int maxFields=4;
   static int bigFieldSize=10;
   static boolean sameFieldOrder=false;
-  static boolean autoCommit=false;
   static int mergeFactor=3;
   static int maxBufferedDocs=3;
   static int seed=0;
@@ -41,8 +40,8 @@
 
   public class MockIndexWriter extends IndexWriter {
 
-    public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException {
-      super(dir, autoCommit, a, create);
+    public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException {
+      super(dir, a, create, mfl);
     }
 
     boolean testPoint(String name) {
@@ -88,7 +87,6 @@
     r = newRandom();
     for (int i=0; i<100; i++) {  // increase iterations for better testing
       sameFieldOrder=r.nextBoolean();
-      autoCommit=r.nextBoolean();
       mergeFactor=r.nextInt(3)+2;
       maxBufferedDocs=r.nextInt(3)+2;
       seed++;
@@ -124,7 +122,7 @@
   
   public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
     Map docs = new HashMap();
-    IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+    IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     w.setUseCompoundFile(false);
 
     /***
@@ -176,7 +174,7 @@
   public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
     Map docs = new HashMap();
     for(int iter=0;iter<3;iter++) {
-      IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+      IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
       w.setUseCompoundFile(false);
 
       // force many merges

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java Thu Oct  8 20:57:32 2009
@@ -51,9 +51,9 @@
     failed = true;
   }
 
-  public void runTest(Directory directory, boolean autoCommit, MergeScheduler merger) throws Exception {
+  public void runTest(Directory directory, MergeScheduler merger) throws Exception {
 
-    IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
+    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(2);
     if (merger != null)
       writer.setMergeScheduler(merger);
@@ -73,8 +73,6 @@
       writer.setMergeFactor(4);
       //writer.setInfoStream(System.out);
 
-      final int docCount = writer.docCount();
-
       Thread[] threads = new Thread[NUM_THREADS];
       
       for(int i=0;i<NUM_THREADS;i++) {
@@ -118,11 +116,9 @@
 
       assertEquals(expectedDocCount, writer.docCount());
 
-      if (!autoCommit) {
-        writer.close();
-        writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
-        writer.setMaxBufferedDocs(2);
-      }
+      writer.close();
+      writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer.setMaxBufferedDocs(2);
 
       IndexReader reader = IndexReader.open(directory, true);
       assertTrue(reader.isOptimized());
@@ -138,10 +134,8 @@
   */
   public void testThreadedOptimize() throws Exception {
     Directory directory = new MockRAMDirectory();
-    runTest(directory, false, new SerialMergeScheduler());
-    runTest(directory, true, new SerialMergeScheduler());
-    runTest(directory, false, new ConcurrentMergeScheduler());
-    runTest(directory, true, new ConcurrentMergeScheduler());
+    runTest(directory, new SerialMergeScheduler());
+    runTest(directory, new ConcurrentMergeScheduler());
     directory.close();
 
     String tempDir = System.getProperty("tempDir");
@@ -150,10 +144,8 @@
 
     String dirName = tempDir + "/luceneTestThreadedOptimize";
     directory = FSDirectory.open(new File(dirName));
-    runTest(directory, false, new SerialMergeScheduler());
-    runTest(directory, true, new SerialMergeScheduler());
-    runTest(directory, false, new ConcurrentMergeScheduler());
-    runTest(directory, true, new ConcurrentMergeScheduler());
+    runTest(directory, new SerialMergeScheduler());
+    runTest(directory, new ConcurrentMergeScheduler());
     directory.close();
     _TestUtil.rmDir(dirName);
   }

Modified: lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java Thu Oct  8 20:57:32 2009
@@ -103,7 +103,7 @@
     RAMDirectory directory = new RAMDirectory();
     PayloadAnalyzer analyzer = new PayloadAnalyzer();
     IndexWriter writer
-            = new IndexWriter(directory, analyzer, true);
+            = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setSimilarity(similarity);
     //writer.infoStream = System.out;
     for (int i = 0; i < numDocs; i++) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java?rev=823321&r1=823320&r2=823321&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java Thu Oct  8 20:57:32 2009
@@ -115,7 +115,7 @@
       throws IOException {
     RAMDirectory directory = new RAMDirectory();
     PayloadAnalyzer analyzer = new PayloadAnalyzer();
-    IndexWriter writer = new IndexWriter(directory, analyzer, true);
+    IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setSimilarity(similarity);
 
     Document doc = new Document();
@@ -308,7 +308,6 @@
     for (int i = 0; i < topDocs.scoreDocs.length; i++) {
       while (spans.next()) {
         Collection payloads = spans.getPayload();
-        int cnt = 0;
         for (Iterator it = payloads.iterator(); it.hasNext();) {
           payloadSet.add(new String((byte[]) it.next()));
         }
@@ -362,7 +361,7 @@
   public void testPayloadSpanUtil() throws Exception {
     RAMDirectory directory = new RAMDirectory();
     PayloadAnalyzer analyzer = new PayloadAnalyzer();
-    IndexWriter writer = new IndexWriter(directory, analyzer, true);
+    IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setSimilarity(similarity);
     Document doc = new Document();
     doc.add(new Field(PayloadHelper.FIELD,"xx rr yy mm  pp", Field.Store.YES, Field.Index.ANALYZED));
@@ -425,7 +424,7 @@
     RAMDirectory directory = new RAMDirectory();
     PayloadAnalyzer analyzer = new PayloadAnalyzer();
     String[] docs = new String[]{"xx rr yy mm  pp","xx yy mm rr pp", "nopayload qq ss pp np", "one two three four five six seven eight nine ten eleven", "nine one two three four five six seven eight eleven ten"};
-    IndexWriter writer = new IndexWriter(directory, analyzer, true);
+    IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
 
     writer.setSimilarity(similarity);
 



Mime
View raw message