lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r823320 [1/2] - in /lucene/java/branches/lucene_2_9_back_compat_tests/src: java/org/apache/lucene/index/ test/org/apache/lucene/ test/org/apache/lucene/index/ test/org/apache/lucene/search/payloads/ test/org/apache/lucene/search/spans/
Date Thu, 08 Oct 2009 20:56:41 GMT
Author: mikemccand
Date: Thu Oct  8 20:56:40 2009
New Revision: 823320

URL: http://svn.apache.org/viewvc?rev=823320&view=rev
Log:
LUCENE-1950: remove autoCommit=true from IndexWriter

Modified:
    lucene/java/branches/lucene_2_9_back_compat_tests/src/java/org/apache/lucene/index/IndexWriter.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAtomicUpdate.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestCrash.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestDeletionPolicy.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriter.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing2.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestThreadedOptimize.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
    lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/java/org/apache/lucene/index/IndexWriter.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/java/org/apache/lucene/index/IndexWriter.java Thu Oct  8 20:56:40 2009
@@ -951,35 +951,6 @@
    * is true, then a new, empty index will be created in
    * <code>path</code>, replacing the index already there, if any.
    *
-   * @param path the path to the index directory
-   * @param a the analyzer to use
-   * @param create <code>true</code> to create the index or overwrite
-   *  the existing one; <code>false</code> to append to the existing
-   *  index
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be read/written to, or
-   *  if it does not exist and <code>create</code> is
-   *  <code>false</code> or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0 release.
-   *  Use {@link
-   *  #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
-   *  instead, and call {@link #commit()} when needed.
-   */
-  public IndexWriter(String path, Analyzer a, boolean create)
-       throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(FSDirectory.getDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
-   * Constructs an IndexWriter for the index in <code>path</code>.
-   * Text will be analyzed with <code>a</code>.  If <code>create</code>
-   * is true, then a new, empty index will be created in
-   * <code>path</code>, replacing the index already there, if any.
-   *
    * <p><b>NOTE</b>: autoCommit (see <a
    * href="#autoCommit">above</a>) is set to false with this
    * constructor.
@@ -1008,35 +979,6 @@
   }
 
   /**
-   * Constructs an IndexWriter for the index in <code>path</code>.
-   * Text will be analyzed with <code>a</code>.  If <code>create</code>
-   * is true, then a new, empty index will be created in
-   * <code>path</code>, replacing the index already there, if any.
-   *
-   * @param path the path to the index directory
-   * @param a the analyzer to use
-   * @param create <code>true</code> to create the index or overwrite
-   *  the existing one; <code>false</code> to append to the existing
-   *  index
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be read/written to, or
-   *  if it does not exist and <code>create</code> is
-   *  <code>false</code> or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0 release.
-   *  Use {@link
-   *  #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
-   *  instead, and call {@link #commit()} when needed.
-   */
-  public IndexWriter(File path, Analyzer a, boolean create)
-       throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(FSDirectory.getDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
    * Constructs an IndexWriter for the index in <code>d</code>.
    * Text will be analyzed with <code>a</code>.  If <code>create</code>
    * is true, then a new, empty index will be created in
@@ -1068,34 +1010,6 @@
   }
 
   /**
-   * Constructs an IndexWriter for the index in <code>d</code>.
-   * Text will be analyzed with <code>a</code>.  If <code>create</code>
-   * is true, then a new, empty index will be created in
-   * <code>d</code>, replacing the index already there, if any.
-   *
-   * @param d the index directory
-   * @param a the analyzer to use
-   * @param create <code>true</code> to create the index or overwrite
-   *  the existing one; <code>false</code> to append to the existing
-   *  index
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be read/written to, or
-   *  if it does not exist and <code>create</code> is
-   *  <code>false</code> or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0
-   *  release, and call {@link #commit()} when needed.
-   *  Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead.
-   */
-  public IndexWriter(Directory d, Analyzer a, boolean create)
-       throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
    * Constructs an IndexWriter for the index in
    * <code>path</code>, first creating it if it does not
    * already exist.  Text will be analyzed with
@@ -1129,30 +1043,6 @@
    * already exist.  Text will be analyzed with
    * <code>a</code>.
    *
-   * @param path the path to the index directory
-   * @param a the analyzer to use
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be
-   *  read/written to or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0
-   *  release, and call {@link #commit()} when needed.
-   *  Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} instead.
-   */
-  public IndexWriter(String path, Analyzer a)
-    throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(FSDirectory.getDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
-   * Constructs an IndexWriter for the index in
-   * <code>path</code>, first creating it if it does not
-   * already exist.  Text will be analyzed with
-   * <code>a</code>.
-   *
    * <p><b>NOTE</b>: autoCommit (see <a
    * href="#autoCommit">above</a>) is set to false with this
    * constructor.
@@ -1178,30 +1068,6 @@
 
   /**
    * Constructs an IndexWriter for the index in
-   * <code>path</code>, first creating it if it does not
-   * already exist.  Text will be analyzed with
-   * <code>a</code>.
-   *
-   * @param path the path to the index directory
-   * @param a the analyzer to use
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be
-   *  read/written to or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0 release.
-   *  Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)}
-   *  instead, and call {@link #commit()} when needed.
-   */
-  public IndexWriter(File path, Analyzer a)
-    throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(FSDirectory.getDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
-   * Constructs an IndexWriter for the index in
    * <code>d</code>, first creating it if it does not
    * already exist.  Text will be analyzed with
    * <code>a</code>.
@@ -1228,87 +1094,6 @@
   }
 
   /**
-   * Constructs an IndexWriter for the index in
-   * <code>d</code>, first creating it if it does not
-   * already exist.  Text will be analyzed with
-   * <code>a</code>.
-   *
-   * @param d the index directory
-   * @param a the analyzer to use
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be
-   *  read/written to or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0 release.
-   *  Use {@link
-   *  #IndexWriter(Directory,Analyzer,MaxFieldLength)}
-   *  instead, and call {@link #commit()} when needed.
-   */
-  public IndexWriter(Directory d, Analyzer a)
-    throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, false, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
-   * Constructs an IndexWriter for the index in
-   * <code>d</code>, first creating it if it does not
-   * already exist.  Text will be analyzed with
-   * <code>a</code>.
-   *
-   * @param d the index directory
-   * @param autoCommit see <a href="#autoCommit">above</a>
-   * @param a the analyzer to use
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be
-   *  read/written to or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0 release.
-   *  Use {@link
-   *  #IndexWriter(Directory,Analyzer,MaxFieldLength)}
-   *  instead, and call {@link #commit()} when needed.
-   */
-  public IndexWriter(Directory d, boolean autoCommit, Analyzer a)
-    throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
-   * Constructs an IndexWriter for the index in <code>d</code>.
-   * Text will be analyzed with <code>a</code>.  If <code>create</code>
-   * is true, then a new, empty index will be created in
-   * <code>d</code>, replacing the index already there, if any.
-   *
-   * @param d the index directory
-   * @param autoCommit see <a href="#autoCommit">above</a>
-   * @param a the analyzer to use
-   * @param create <code>true</code> to create the index or overwrite
-   *  the existing one; <code>false</code> to append to the existing
-   *  index
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be read/written to, or
-   *  if it does not exist and <code>create</code> is
-   *  <code>false</code> or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0 release.
-   *  Use {@link
-   *  #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
-   *  instead, and call {@link #commit()} when needed.
-   */
-  public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create)
-       throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
    * Expert: constructs an IndexWriter with a custom {@link
    * IndexDeletionPolicy}, for the index in <code>d</code>,
    * first creating it if it does not already exist.  Text
@@ -1337,33 +1122,6 @@
 
   /**
    * Expert: constructs an IndexWriter with a custom {@link
-   * IndexDeletionPolicy}, for the index in <code>d</code>,
-   * first creating it if it does not already exist.  Text
-   * will be analyzed with <code>a</code>.
-   *
-   * @param d the index directory
-   * @param autoCommit see <a href="#autoCommit">above</a>
-   * @param a the analyzer to use
-   * @param deletionPolicy see <a href="#deletionPolicy">above</a>
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be
-   *  read/written to or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0 release.
-   *  Use {@link
-   *  #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)}
-   *  instead, and call {@link #commit()} when needed.
-   */
-  public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy)
-    throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-  
-  /**
-   * Expert: constructs an IndexWriter with a custom {@link
    * IndexDeletionPolicy}, for the index in <code>d</code>.
    * Text will be analyzed with <code>a</code>.  If
    * <code>create</code> is true, then a new, empty index
@@ -1433,39 +1191,6 @@
   }
   
   /**
-   * Expert: constructs an IndexWriter with a custom {@link
-   * IndexDeletionPolicy}, for the index in <code>d</code>.
-   * Text will be analyzed with <code>a</code>.  If
-   * <code>create</code> is true, then a new, empty index
-   * will be created in <code>d</code>, replacing the index
-   * already there, if any.
-   *
-   * @param d the index directory
-   * @param autoCommit see <a href="#autoCommit">above</a>
-   * @param a the analyzer to use
-   * @param create <code>true</code> to create the index or overwrite
-   *  the existing one; <code>false</code> to append to the existing
-   *  index
-   * @param deletionPolicy see <a href="#deletionPolicy">above</a>
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws LockObtainFailedException if another writer
-   *  has this index open (<code>write.lock</code> could not
-   *  be obtained)
-   * @throws IOException if the directory cannot be read/written to, or
-   *  if it does not exist and <code>create</code> is
-   *  <code>false</code> or if there is any other low-level
-   *  IO error
-   * @deprecated This constructor will be removed in the 3.0 release.
-   *  Use {@link
-   *  #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)}
-   *  instead, and call {@link #commit()} when needed.
-   */
-  public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy)
-          throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
-  }
-
-  /**
    * Expert: constructs an IndexWriter on specific commit
    * point, with a custom {@link IndexDeletionPolicy}, for
    * the index in <code>d</code>.  Text will be analyzed

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java Thu Oct  8 20:56:40 2009
@@ -50,10 +50,8 @@
   public static final String INDEX_PATH = "test.snapshots";
 
   public void testSnapshotDeletionPolicy() throws Exception {
-    File dir = new File(System.getProperty("tempDir"), INDEX_PATH);
+    File dir = _TestUtil.getTempDir(INDEX_PATH);
     try {
-      // Sometimes past test leaves the dir
-      _TestUtil.rmDir(dir);
       Directory fsDir = FSDirectory.open(dir);
       runTest(fsDir);
       fsDir.close();
@@ -70,27 +68,36 @@
     Directory dir = new MockRAMDirectory();
 
     SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
-    IndexWriter writer = new IndexWriter(dir, true,new StandardAnalyzer(), dp);
+    IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED);
     // Force frequent commits
     writer.setMaxBufferedDocs(2);
     Document doc = new Document();
     doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-    for(int i=0;i<7;i++)
+    for(int i=0;i<7;i++) {
       writer.addDocument(doc);
+      if (i % 2 == 0) {
+        writer.commit();
+      }
+    }
     IndexCommit cp = (IndexCommit) dp.snapshot();
     copyFiles(dir, cp);
     writer.close();
     copyFiles(dir, cp);
     
-    writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
+    writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED);
     copyFiles(dir, cp);
-    for(int i=0;i<7;i++)
+    for(int i=0;i<7;i++) {
       writer.addDocument(doc);
+      if (i % 2 == 0) {
+        writer.commit();
+      }
+    }
+
     copyFiles(dir, cp);
     writer.close();
     copyFiles(dir, cp);
     dp.release();
-    writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
+    writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.close();
     try {
       copyFiles(dir, cp);
@@ -106,7 +113,7 @@
     final long stopTime = System.currentTimeMillis() + 7000;
 
     SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
-    final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
+    final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED);
 
     // Force frequent commits
     writer.setMaxBufferedDocs(2);
@@ -123,6 +130,13 @@
                 t.printStackTrace(System.out);
                 fail("addDocument failed");
               }
+              if (i%2 == 0) {
+                try {
+                  writer.commit();
+                } catch (Exception e) {
+                  throw new RuntimeException(e);
+                }
+              }
             }
             try {
               Thread.sleep(1);

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java Thu Oct  8 20:56:40 2009
@@ -149,6 +149,7 @@
     writer.deleteDocuments(q);
 
     writer.optimize();
+    writer.commit();
 
     verifyNumDocs(dir, 1039);
     verifyTermDocs(dir, new Term("content", "aaa"), 1030);
@@ -187,6 +188,7 @@
     writer.deleteDocuments(q);
 
     writer.optimize();
+    writer.commit();
 
     verifyNumDocs(dir, 1039);
     verifyTermDocs(dir, new Term("content", "aaa"), 1030);
@@ -225,6 +227,7 @@
     writer.addIndexesNoOptimize(new Directory[] {aux});
 
     writer.optimize();
+    writer.commit();
 
     verifyNumDocs(dir, 1039);
     verifyTermDocs(dir, new Term("content", "aaa"), 1030);
@@ -425,7 +428,7 @@
 
   private IndexWriter newWriter(Directory dir, boolean create)
       throws IOException {
-    final IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), create);
+    final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), create, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
     return writer;
   }

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestAtomicUpdate.java Thu Oct  8 20:56:40 2009
@@ -33,8 +33,8 @@
 
   public class MockIndexWriter extends IndexWriter {
 
-    public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException {
-      super(dir, autoCommit, a, create);
+    public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException {
+      super(dir, a, create, mfl);
     }
 
     boolean testPoint(String name) {
@@ -125,7 +125,7 @@
 
     TimedThread[] threads = new TimedThread[4];
 
-    IndexWriter writer = new MockIndexWriter(directory, true, ANALYZER, true);
+    IndexWriter writer = new MockIndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(7);
     writer.setMergeFactor(3);
 

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Thu Oct  8 20:56:40 2009
@@ -33,10 +33,12 @@
   private static final Analyzer ANALYZER = new SimpleAnalyzer();
 
   private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
-    boolean doFail = false;
+    boolean doFail;
+    boolean hitExc;
 
     public void setDoFail() {
       this.doFail = true;
+      hitExc = false;
     }
     public void clearDoFail() {
       this.doFail = false;
@@ -47,6 +49,7 @@
         StackTraceElement[] trace = new Exception().getStackTrace();
         for (int i = 0; i < trace.length; i++) {
           if ("doFlush".equals(trace[i].getMethodName())) {
+            hitExc = true;
             //new RuntimeException().printStackTrace(System.out);
             throw new IOException("now failing during flush");
           }
@@ -63,33 +66,42 @@
     FailOnlyOnFlush failure = new FailOnlyOnFlush();
     directory.failOn(failure);
 
-    IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
+    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
     ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
     writer.setMergeScheduler(cms);
     writer.setMaxBufferedDocs(2);
     Document doc = new Document();
     Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
+    int extraCount = 0;
     for(int i=0;i<10;i++) {
       for(int j=0;j<20;j++) {
         idField.setValue(Integer.toString(i*20+j));
         writer.addDocument(doc);
       }
 
-      writer.addDocument(doc);
-
-      failure.setDoFail();
-      try {
-        writer.flush();
-        fail("failed to hit IOException");
-      } catch (IOException ioe) {
-        failure.clearDoFail();
+      // must cycle here because sometimes the merge flushes
+      // the doc we just added and so there's nothing to
+      // flush, and we don't hit the exception
+      while(true) {
+        writer.addDocument(doc);
+        failure.setDoFail();
+        try {
+          writer.flush();
+          if (failure.hitExc) {
+            fail("failed to hit IOException");
+          }
+          extraCount++;
+        } catch (IOException ioe) {
+          failure.clearDoFail();
+          break;
+        }
       }
     }
 
     writer.close();
     IndexReader reader = IndexReader.open(directory, true);
-    assertEquals(200, reader.numDocs());
+    assertEquals(200+extraCount, reader.numDocs());
     reader.close();
     directory.close();
   }
@@ -100,7 +112,7 @@
 
     RAMDirectory directory = new MockRAMDirectory();
 
-    IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
+    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
     ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
     writer.setMergeScheduler(cms);
 
@@ -142,32 +154,28 @@
 
     RAMDirectory directory = new MockRAMDirectory();
 
-    for(int pass=0;pass<2;pass++) {
-
-      boolean autoCommit = pass==0;
-      IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
-
-      for(int iter=0;iter<7;iter++) {
-        ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
-        writer.setMergeScheduler(cms);
-        writer.setMaxBufferedDocs(2);
-
-        for(int j=0;j<21;j++) {
-          Document doc = new Document();
-          doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
-          writer.addDocument(doc);
-        }
-        
-        writer.close();
-        TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles autoCommit=" + autoCommit);
+    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
 
-        // Reopen
-        writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
+    for(int iter=0;iter<7;iter++) {
+      ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+      writer.setMergeScheduler(cms);
+      writer.setMaxBufferedDocs(2);
+
+      for(int j=0;j<21;j++) {
+        Document doc = new Document();
+        doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
       }
-
+        
       writer.close();
+      TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");
+
+      // Reopen
+      writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
     }
 
+    writer.close();
+
     directory.close();
   }
 
@@ -178,45 +186,41 @@
     Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = pass==0;
-      IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
-
-      for(int iter=0;iter<10;iter++) {
-        ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
-        writer.setMergeScheduler(cms);
-        writer.setMaxBufferedDocs(2);
-        writer.setMergeFactor(100);
-
-        for(int j=0;j<201;j++) {
-          idField.setValue(Integer.toString(iter*201+j));
-          writer.addDocument(doc);
-        }
+    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
 
-        int delID = iter*201;
-        for(int j=0;j<20;j++) {
-          writer.deleteDocuments(new Term("id", Integer.toString(delID)));
-          delID += 5;
-        }
+    for(int iter=0;iter<10;iter++) {
+      ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+      writer.setMergeScheduler(cms);
+      writer.setMaxBufferedDocs(2);
+      writer.setMergeFactor(100);
 
-        // Force a bunch of merge threads to kick off so we
-        // stress out aborting them on close:
-        writer.setMergeFactor(3);
+      for(int j=0;j<201;j++) {
+        idField.setValue(Integer.toString(iter*201+j));
         writer.addDocument(doc);
-        writer.flush();
+      }
+
+      int delID = iter*201;
+      for(int j=0;j<20;j++) {
+        writer.deleteDocuments(new Term("id", Integer.toString(delID)));
+        delID += 5;
+      }
 
-        writer.close(false);
+      // Force a bunch of merge threads to kick off so we
+      // stress out aborting them on close:
+      writer.setMergeFactor(3);
+      writer.addDocument(doc);
+      writer.flush();
 
-        IndexReader reader = IndexReader.open(directory, true);
-        assertEquals((1+iter)*182, reader.numDocs());
-        reader.close();
+      writer.close(false);
 
-        // Reopen
-        writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
-      }
-      writer.close();
-    }
+      IndexReader reader = IndexReader.open(directory, true);
+      assertEquals((1+iter)*182, reader.numDocs());
+      reader.close();
 
+      // Reopen
+      writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
+    }
+    writer.close();
     directory.close();
   }
 }

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestCrash.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestCrash.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestCrash.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestCrash.java Thu Oct  8 20:56:40 2009
@@ -35,7 +35,7 @@
   private IndexWriter initIndex(MockRAMDirectory dir) throws IOException {
     dir.setLockFactory(NoLockFactory.getNoLockFactory());
 
-    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer());
+    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     //writer.setMaxBufferedDocs(2);
     writer.setMaxBufferedDocs(10);
     ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestDeletionPolicy.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestDeletionPolicy.java Thu Oct  8 20:56:40 2009
@@ -202,12 +202,11 @@
 
     final double SECONDS = 2.0;
 
-    boolean autoCommit = false;
     boolean useCompoundFile = true;
 
     Directory dir = new RAMDirectory();
     ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
-    IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setUseCompoundFile(useCompoundFile);
     writer.close();
 
@@ -216,7 +215,7 @@
       // Record last time when writer performed deletes of
       // past commits
       lastDeleteTime = System.currentTimeMillis();
-      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       for(int j=0;j<17;j++) {
         addDoc(writer);
@@ -267,10 +266,9 @@
    */
   public void testKeepAllDeletionPolicy() throws IOException {
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       // Never deletes a commit
       KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy();
@@ -278,37 +276,29 @@
       Directory dir = new RAMDirectory();
       policy.dir = dir;
 
-      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       writer.setMergeScheduler(new SerialMergeScheduler());
       for(int i=0;i<107;i++) {
         addDoc(writer);
-        if (autoCommit && i%10 == 0)
-          writer.commit();
       }
       writer.close();
 
-      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
       writer.close();
 
       assertEquals(2, policy.numOnInit);
-      if (!autoCommit)
-        // If we are not auto committing then there should
-        // be exactly 2 commits (one per close above):
-        assertEquals(2, policy.numOnCommit);
+      // If we are not auto committing then there should
+      // be exactly 2 commits (one per close above):
+      assertEquals(2, policy.numOnCommit);
 
       // Test listCommits
       Collection commits = IndexReader.listCommits(dir);
-      if (!autoCommit)
-        // 1 from opening writer + 2 from closing writer
-        assertEquals(3, commits.size());
-      else
-        // 1 from opening writer + 2 from closing writer +
-        // 11 from calling writer.commit() explicitly above
-        assertEquals(14, commits.size());
+      // 1 from opening writer + 2 from closing writer
+      assertEquals(3, commits.size());
 
       Iterator it = commits.iterator();
       // Make sure we can open a reader on each commit:
@@ -453,16 +443,15 @@
    */
   public void testKeepNoneOnInitDeletionPolicy() throws IOException {
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       KeepNoneOnInitDeletionPolicy policy = new KeepNoneOnInitDeletionPolicy();
 
       Directory dir = new RAMDirectory();
 
-      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       for(int i=0;i<107;i++) {
@@ -470,16 +459,15 @@
       }
       writer.close();
 
-      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
       writer.close();
 
       assertEquals(2, policy.numOnInit);
-      if (!autoCommit)
-        // If we are not auto committing then there should
-        // be exactly 2 commits (one per close above):
-        assertEquals(2, policy.numOnCommit);
+      // If we are not auto committing then there should
+      // be exactly 2 commits (one per close above):
+      assertEquals(2, policy.numOnCommit);
 
       // Simplistic check: just verify the index is in fact
       // readable:
@@ -497,17 +485,16 @@
 
     final int N = 5;
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       Directory dir = new RAMDirectory();
 
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       for(int j=0;j<N+1;j++) {
-        IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+        IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setMaxBufferedDocs(10);
         writer.setUseCompoundFile(useCompoundFile);
         for(int i=0;i<17;i++) {
@@ -519,11 +506,7 @@
 
       assertTrue(policy.numDelete > 0);
       assertEquals(N+1, policy.numOnInit);
-      if (autoCommit) {
-        assertTrue(policy.numOnCommit > 1);
-      } else {
-        assertEquals(N+1, policy.numOnCommit);
-      }
+      assertEquals(N+1, policy.numOnCommit);
 
       // Simplistic check: just verify only the past N segments_N's still
       // exist, and, I can open a reader on each:
@@ -559,22 +542,21 @@
 
     final int N = 10;
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       Directory dir = new RAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.close();
       Term searchTerm = new Term("content", "aaa");        
       Query query = new TermQuery(searchTerm);
 
       for(int i=0;i<N+1;i++) {
-        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setUseCompoundFile(useCompoundFile);
         for(int j=0;j<17;j++) {
           addDoc(writer);
@@ -591,15 +573,14 @@
         reader.close();
         searcher.close();
       }
-      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
       // this is a commit when autoCommit=false:
       writer.close();
 
       assertEquals(2*(N+2), policy.numOnInit);
-      if (!autoCommit)
-        assertEquals(2*(N+2)-1, policy.numOnCommit);
+      assertEquals(2*(N+2)-1, policy.numOnCommit);
 
       IndexSearcher searcher = new IndexSearcher(dir, false);
       ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -619,19 +600,17 @@
           // Work backwards in commits on what the expected
           // count should be.  Only check this in the
           // autoCommit false case:
-          if (!autoCommit) {
-            searcher = new IndexSearcher(reader);
-            hits = searcher.search(query, null, 1000).scoreDocs;
-            if (i > 1) {
-              if (i % 2 == 0) {
-                expectedCount += 1;
-              } else {
-                expectedCount -= 17;
-              }
+          searcher = new IndexSearcher(reader);
+          hits = searcher.search(query, null, 1000).scoreDocs;
+          if (i > 1) {
+            if (i % 2 == 0) {
+              expectedCount += 1;
+            } else {
+              expectedCount -= 17;
             }
-            assertEquals(expectedCount, hits.length);
-            searcher.close();
           }
+          assertEquals(expectedCount, hits.length);
+          searcher.close();
           reader.close();
           if (i == N) {
             fail("should have failed on commits before last 5");
@@ -659,15 +638,14 @@
 
     final int N = 10;
 
-    for(int pass=0;pass<4;pass++) {
+    for(int pass=0;pass<2;pass++) {
 
-      boolean autoCommit = pass < 2;
-      boolean useCompoundFile = (pass % 2) > 0;
+      boolean useCompoundFile = (pass % 2) != 0;
 
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       Directory dir = new RAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       writer.close();
@@ -676,7 +654,7 @@
 
       for(int i=0;i<N+1;i++) {
 
-        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setMaxBufferedDocs(10);
         writer.setUseCompoundFile(useCompoundFile);
         for(int j=0;j<17;j++) {
@@ -694,15 +672,14 @@
         reader.close();
         searcher.close();
 
-        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         // This will not commit: there are no changes
         // pending because we opened for "create":
         writer.close();
       }
 
       assertEquals(1+3*(N+1), policy.numOnInit);
-      if (!autoCommit)
-        assertEquals(3*(N+1), policy.numOnCommit);
+      assertEquals(3*(N+1), policy.numOnCommit);
 
       IndexSearcher searcher = new IndexSearcher(dir, false);
       ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -722,19 +699,18 @@
           // Work backwards in commits on what the expected
           // count should be.  Only check this in the
           // autoCommit false case:
-          if (!autoCommit) {
-            searcher = new IndexSearcher(reader);
-            hits = searcher.search(query, null, 1000).scoreDocs;
-            assertEquals(expectedCount, hits.length);
-            searcher.close();
-            if (expectedCount == 0) {
-              expectedCount = 16;
-            } else if (expectedCount == 16) {
-              expectedCount = 17;
-            } else if (expectedCount == 17) {
-              expectedCount = 0;
-            }
+          searcher = new IndexSearcher(reader);
+          hits = searcher.search(query, null, 1000).scoreDocs;
+          assertEquals(expectedCount, hits.length);
+          searcher.close();
+          if (expectedCount == 0) {
+            expectedCount = 16;
+          } else if (expectedCount == 16) {
+            expectedCount = 17;
+          } else if (expectedCount == 17) {
+            expectedCount = 0;
           }
+
           reader.close();
           if (i == N) {
             fail("should have failed on commits before last " + N);

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriter.java Thu Oct  8 20:56:40 2009
@@ -121,7 +121,7 @@
         reader.close();
 
         // optimize the index and check that the new doc count is correct
-        writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
         assertEquals(100, writer.maxDoc());
         assertEquals(60, writer.numDocs());
         writer.optimize();
@@ -231,7 +231,7 @@
         startDiskUsage += startDir.fileLength(files[i]);
       }
 
-      for(int iter=0;iter<6;iter++) {
+      for(int iter=0;iter<3;iter++) {
 
         if (debug)
           System.out.println("TEST: iter=" + iter);
@@ -239,8 +239,7 @@
         // Start with 100 bytes more than we are currently using:
         long diskFree = diskUsage+100;
 
-        boolean autoCommit = iter % 2 == 0;
-        int method = iter/2;
+        int method = iter;
 
         boolean success = false;
         boolean done = false;
@@ -258,7 +257,7 @@
 
           // Make a new dir that will enforce disk usage:
           MockRAMDirectory dir = new MockRAMDirectory(startDir);
-          writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
+          writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
           IOException err = null;
 
           MergeScheduler ms = writer.getMergeScheduler();
@@ -294,12 +293,12 @@
                 rate = 0.0;
               }
               if (debug)
-                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes autoCommit=" + autoCommit;
+                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
             } else {
               thisDiskFree = 0;
               rate = 0.0;
               if (debug)
-                testName = "disk full test " + methodName + " with unlimited disk space autoCommit=" + autoCommit;
+                testName = "disk full test " + methodName + " with unlimited disk space";
             }
 
             if (debug)
@@ -355,29 +354,6 @@
             // ConcurrentMergeScheduler are done
             _TestUtil.syncConcurrentMerges(writer);
 
-            if (autoCommit) {
-
-              // Whether we succeeded or failed, check that
-              // all un-referenced files were in fact
-              // deleted (ie, we did not create garbage).
-              // Only check this when autoCommit is true:
-              // when it's false, it's expected that there
-              // are unreferenced files (ie they won't be
-              // referenced until the "commit on close").
-              // Just create a new IndexFileDeleter, have it
-              // delete unreferenced files, then verify that
-              // in fact no files were deleted:
-
-              String successStr;
-              if (success) {
-                successStr = "success";
-              } else {
-                successStr = "IOException";
-              }
-              String message = methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes)";
-              assertNoUnreferencedFiles(dir, message);
-            }
-
             if (debug) {
               System.out.println("  now test readers");
             }
@@ -394,10 +370,8 @@
             }
             int result = reader.docFreq(searchTerm);
             if (success) {
-              if (autoCommit && result != END_COUNT) {
-                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
-              } else if (!autoCommit && result != START_COUNT) {
-                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " [autoCommit = false]");
+              if (result != START_COUNT) {
+                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
               }
             } else {
               // On hitting exception we still may have added
@@ -484,18 +458,17 @@
 
       boolean debug = false;
 
-      for(int pass=0;pass<3;pass++) {
+      for(int pass=0;pass<2;pass++) {
         if (debug)
           System.out.println("TEST: pass=" + pass);
-        boolean autoCommit = pass == 0;
-        boolean doAbort = pass == 2;
+        boolean doAbort = pass == 1;
         long diskFree = 200;
         while(true) {
           if (debug)
             System.out.println("TEST: cycle: diskFree=" + diskFree);
           MockRAMDirectory dir = new MockRAMDirectory();
           dir.setMaxSizeInBytes(diskFree);
-          IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+          IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
 
           MergeScheduler ms = writer.getMergeScheduler();
           if (ms instanceof ConcurrentMergeScheduler)
@@ -535,7 +508,7 @@
 
             _TestUtil.syncConcurrentMerges(ms);
 
-            assertNoUnreferencedFiles(dir, "after disk full during addDocument with autoCommit=" + autoCommit);
+            assertNoUnreferencedFiles(dir, "after disk full during addDocument with");
 
             // Make sure reader can open the index:
             IndexReader.open(dir, true).close();
@@ -2120,15 +2093,14 @@
     Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
 
-    for(int pass=0;pass<3;pass++) {
-      boolean autoCommit = pass%2 == 0;
-      IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true);
+    for(int pass=0;pass<2;pass++) {
+      IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
 
       //System.out.println("TEST: pass=" + pass + " ac=" + autoCommit + " cms=" + (pass >= 2));
       for(int iter=0;iter<10;iter++) {
         //System.out.println("TEST: iter=" + iter);
         MergeScheduler ms;
-        if (pass >= 2)
+        if (pass == 1)
           ms = new ConcurrentMergeScheduler();
         else
           ms = new SerialMergeScheduler();
@@ -2193,7 +2165,7 @@
         reader.close();
 
         // Reopen
-        writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false);
+        writer = new IndexWriter(directory, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
       }
       writer.close();
     }
@@ -2364,7 +2336,7 @@
 
     for(int iter=0;iter<10;iter++) {
       MockRAMDirectory dir = new MockRAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
       ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
       // We expect disk full exceptions in the merge threads
       cms.setSuppressExceptions();
@@ -2425,7 +2397,7 @@
   public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
     MockRAMDirectory dir = new MockRAMDirectory();
 
-    IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(2);
     final Document doc = new Document();
     doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
@@ -2438,6 +2410,7 @@
     try {
       writer.addDocument(doc);
       writer.addDocument(doc);
+      writer.commit();
       fail("did not hit exception");
     } catch (IOException ioe) {
     }
@@ -2725,7 +2698,7 @@
     FailOnlyInSync failure = new FailOnlyInSync();
     dir.failOn(failure);
 
-    IndexWriter writer  = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     failure.setDoFail();
 
     ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
@@ -2735,8 +2708,16 @@
     writer.setMaxBufferedDocs(2);
     writer.setMergeFactor(5);
 
-    for (int i = 0; i < 23; i++)
+    for (int i = 0; i < 23; i++) {
       addDoc(writer);
+      if ((i-1)%2 == 0) {
+        try {
+          writer.commit();
+        } catch (IOException ioe) {
+          // expected
+        }
+      }
+    }
 
     cms.sync();
     assertTrue(failure.didFail);
@@ -2753,10 +2734,9 @@
   public void testTermVectorCorruption() throws IOException {
 
     Directory dir = new MockRAMDirectory();
-    for(int iter=0;iter<4;iter++) {
-      final boolean autoCommit = 1==iter/2;
+    for(int iter=0;iter<2;iter++) {
       IndexWriter writer = new IndexWriter(dir,
-                                           autoCommit, new StandardAnalyzer());
+                                           new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(2);
       writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
       writer.setMergeScheduler(new SerialMergeScheduler());
@@ -2789,7 +2769,7 @@
       reader.close();
 
       writer = new IndexWriter(dir,
-                               autoCommit, new StandardAnalyzer());
+                               new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(2);
       writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
       writer.setMergeScheduler(new SerialMergeScheduler());
@@ -2805,10 +2785,9 @@
   // LUCENE-1168
   public void testTermVectorCorruption2() throws IOException {
     Directory dir = new MockRAMDirectory();
-    for(int iter=0;iter<4;iter++) {
-      final boolean autoCommit = 1==iter/2;
+    for(int iter=0;iter<2;iter++) {
       IndexWriter writer = new IndexWriter(dir,
-                                           autoCommit, new StandardAnalyzer());
+                                           new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(2);
       writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
       writer.setMergeScheduler(new SerialMergeScheduler());
@@ -3053,7 +3032,7 @@
   // LUCENE-1179
   public void testEmptyFieldName() throws IOException {
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     Document doc = new Document();
     doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
     writer.addDocument(doc);
@@ -4038,7 +4017,7 @@
 
     final List thrown = new ArrayList();
 
-    final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer()) {
+    final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED) {
         public void message(final String message) {
           if (message.startsWith("now flush at close") && 0 == thrown.size()) {
             thrown.add(null);
@@ -4328,7 +4307,7 @@
 
   public void testDeadlock() throws Exception {
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(2);
     Document doc = new Document();
     doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Thu Oct  8 20:56:40 2009
@@ -40,237 +40,217 @@
         "Venice has lots of canals" };
     String[] text = { "Amsterdam", "Venice" };
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setUseCompoundFile(true);
+    modifier.setMaxBufferedDeleteTerms(1);
 
-      Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setUseCompoundFile(true);
-      modifier.setMaxBufferedDeleteTerms(1);
+    for (int i = 0; i < keywords.length; i++) {
+      Document doc = new Document();
+      doc.add(new Field("id", keywords[i], Field.Store.YES,
+                        Field.Index.NOT_ANALYZED));
+      doc.add(new Field("country", unindexed[i], Field.Store.YES,
+                        Field.Index.NO));
+      doc.add(new Field("contents", unstored[i], Field.Store.NO,
+                        Field.Index.ANALYZED));
+      doc
+        .add(new Field("city", text[i], Field.Store.YES,
+                       Field.Index.ANALYZED));
+      modifier.addDocument(doc);
+    }
+    modifier.optimize();
+    modifier.commit();
 
-      for (int i = 0; i < keywords.length; i++) {
-        Document doc = new Document();
-        doc.add(new Field("id", keywords[i], Field.Store.YES,
-                          Field.Index.NOT_ANALYZED));
-        doc.add(new Field("country", unindexed[i], Field.Store.YES,
-                          Field.Index.NO));
-        doc.add(new Field("contents", unstored[i], Field.Store.NO,
-                          Field.Index.ANALYZED));
-        doc
-          .add(new Field("city", text[i], Field.Store.YES,
-                         Field.Index.ANALYZED));
-        modifier.addDocument(doc);
-      }
-      modifier.optimize();
-      modifier.commit();
+    Term term = new Term("city", "Amsterdam");
+    int hitCount = getHitCount(dir, term);
+    assertEquals(1, hitCount);
+    modifier.deleteDocuments(term);
+    modifier.commit();
+    hitCount = getHitCount(dir, term);
+    assertEquals(0, hitCount);
 
-      Term term = new Term("city", "Amsterdam");
-      int hitCount = getHitCount(dir, term);
-      assertEquals(1, hitCount);
-      modifier.deleteDocuments(term);
-      modifier.commit();
-      hitCount = getHitCount(dir, term);
-      assertEquals(0, hitCount);
+    modifier.close();
+    dir.close();
 
-      modifier.close();
-      dir.close();
-    }
   }
 
   // test when delete terms only apply to disk segments
   public void testNonRAMDelete() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
 
-      Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setMaxBufferedDocs(2);
-      modifier.setMaxBufferedDeleteTerms(2);
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setMaxBufferedDocs(2);
+    modifier.setMaxBufferedDeleteTerms(2);
 
-      int id = 0;
-      int value = 100;
+    int id = 0;
+    int value = 100;
 
-      for (int i = 0; i < 7; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.commit();
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
 
-      assertEquals(0, modifier.getNumBufferedDocuments());
-      assertTrue(0 < modifier.getSegmentCount());
+    assertEquals(0, modifier.getNumBufferedDocuments());
+    assertTrue(0 < modifier.getSegmentCount());
 
-      modifier.commit();
+    modifier.commit();
 
-      IndexReader reader = IndexReader.open(dir, true);
-      assertEquals(7, reader.numDocs());
-      reader.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
 
-      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
 
-      modifier.commit();
+    modifier.commit();
 
-      reader = IndexReader.open(dir, true);
-      assertEquals(0, reader.numDocs());
-      reader.close();
-      modifier.close();
-      dir.close();
-    }
+    reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+    modifier.close();
+    dir.close();
   }
 
   public void testMaxBufferedDeletes() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-      Directory dir = new MockRAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, autoCommit,
-                                           new WhitespaceAnalyzer(), true);
-      writer.setMaxBufferedDeleteTerms(1);
-      writer.deleteDocuments(new Term("foobar", "1"));
-      writer.deleteDocuments(new Term("foobar", "1"));
-      writer.deleteDocuments(new Term("foobar", "1"));
-      assertEquals(3, writer.getFlushDeletesCount());
-      writer.close();
-      dir.close();
-    }
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir,
+                                         new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    writer.setMaxBufferedDeleteTerms(1);
+    writer.deleteDocuments(new Term("foobar", "1"));
+    writer.deleteDocuments(new Term("foobar", "1"));
+    writer.deleteDocuments(new Term("foobar", "1"));
+    assertEquals(3, writer.getFlushDeletesCount());
+    writer.close();
+    dir.close();
   }
 
   // test when delete terms only apply to ram segments
   public void testRAMDeletes() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      for(int t=0;t<2;t++) {
-        boolean autoCommit = (0==pass);
-        Directory dir = new MockRAMDirectory();
-        IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                               new WhitespaceAnalyzer(), true);
-        modifier.setMaxBufferedDocs(4);
-        modifier.setMaxBufferedDeleteTerms(4);
+    for(int t=0;t<2;t++) {
+      Directory dir = new MockRAMDirectory();
+      IndexWriter modifier = new IndexWriter(dir,
+                                             new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+      modifier.setMaxBufferedDocs(4);
+      modifier.setMaxBufferedDeleteTerms(4);
 
-        int id = 0;
-        int value = 100;
+      int id = 0;
+      int value = 100;
 
-        addDoc(modifier, ++id, value);
-        if (0 == t)
-          modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-        else
-          modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
-        addDoc(modifier, ++id, value);
-        if (0 == t) {
-          modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-          assertEquals(2, modifier.getNumBufferedDeleteTerms());
-          assertEquals(1, modifier.getBufferedDeleteTermsSize());
-        }
-        else
-          modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
+      addDoc(modifier, ++id, value);
+      if (0 == t)
+        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+      else
+        modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
+      addDoc(modifier, ++id, value);
+      if (0 == t) {
+        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+        assertEquals(2, modifier.getNumBufferedDeleteTerms());
+        assertEquals(1, modifier.getBufferedDeleteTermsSize());
+      }
+      else
+        modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
 
-        addDoc(modifier, ++id, value);
-        assertEquals(0, modifier.getSegmentCount());
-        modifier.flush();
+      addDoc(modifier, ++id, value);
+      assertEquals(0, modifier.getSegmentCount());
+      modifier.flush();
 
-        modifier.commit();
+      modifier.commit();
 
-        IndexReader reader = IndexReader.open(dir, true);
-        assertEquals(1, reader.numDocs());
+      IndexReader reader = IndexReader.open(dir, true);
+      assertEquals(1, reader.numDocs());
 
-        int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
-        assertEquals(1, hitCount);
-        reader.close();
-        modifier.close();
-        dir.close();
-      }
+      int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
+      assertEquals(1, hitCount);
+      reader.close();
+      modifier.close();
+      dir.close();
     }
   }
 
   // test when delete terms apply to both disk and ram segments
   public void testBothDeletes() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
 
-      Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setMaxBufferedDocs(100);
-      modifier.setMaxBufferedDeleteTerms(100);
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setMaxBufferedDocs(100);
+    modifier.setMaxBufferedDeleteTerms(100);
 
-      int id = 0;
-      int value = 100;
+    int id = 0;
+    int value = 100;
 
-      for (int i = 0; i < 5; i++) {
-        addDoc(modifier, ++id, value);
-      }
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
 
-      value = 200;
-      for (int i = 0; i < 5; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.commit();
+    value = 200;
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
 
-      for (int i = 0; i < 5; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
 
-      modifier.commit();
+    modifier.commit();
 
-      IndexReader reader = IndexReader.open(dir, true);
-      assertEquals(5, reader.numDocs());
-      modifier.close();
-    }
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(5, reader.numDocs());
+    modifier.close();
   }
 
   // test that batched delete terms are flushed together
   public void testBatchDeletes() throws IOException {
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-      Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
-      modifier.setMaxBufferedDocs(2);
-      modifier.setMaxBufferedDeleteTerms(2);
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    modifier.setMaxBufferedDocs(2);
+    modifier.setMaxBufferedDeleteTerms(2);
 
-      int id = 0;
-      int value = 100;
+    int id = 0;
+    int value = 100;
 
-      for (int i = 0; i < 7; i++) {
-        addDoc(modifier, ++id, value);
-      }
-      modifier.commit();
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
 
-      IndexReader reader = IndexReader.open(dir, true);
-      assertEquals(7, reader.numDocs());
-      reader.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
       
-      id = 0;
-      modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
-      modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
+    id = 0;
+    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
+    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
 
-      modifier.commit();
-
-      reader = IndexReader.open(dir, true);
-      assertEquals(5, reader.numDocs());
-      reader.close();
+    modifier.commit();
 
-      Term[] terms = new Term[3];
-      for (int i = 0; i < terms.length; i++) {
-        terms[i] = new Term("id", String.valueOf(++id));
-      }
-      modifier.deleteDocuments(terms);
-      modifier.commit();
-      reader = IndexReader.open(dir, true);
-      assertEquals(2, reader.numDocs());
-      reader.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(5, reader.numDocs());
+    reader.close();
 
-      modifier.close();
-      dir.close();
+    Term[] terms = new Term[3];
+    for (int i = 0; i < terms.length; i++) {
+      terms[i] = new Term("id", String.valueOf(++id));
     }
+    modifier.deleteDocuments(terms);
+    modifier.commit();
+    reader = IndexReader.open(dir, true);
+    assertEquals(2, reader.numDocs());
+    reader.close();
+
+    modifier.close();
+    dir.close();
   }
 
   // test deleteAll()
   public void testDeleteAll() throws IOException {
-    for (int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
       Directory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
+      IndexWriter modifier = new IndexWriter(dir,
+                                             new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
       modifier.setMaxBufferedDocs(2);
       modifier.setMaxBufferedDeleteTerms(2);
 
@@ -311,14 +291,13 @@
 
       modifier.close();
       dir.close();
-    }
   }
 
   // test rollback of deleteAll()
   public void testDeleteAllRollback() throws IOException {
     Directory dir = new MockRAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir, false,
-                                           new WhitespaceAnalyzer(), true);
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     modifier.setMaxBufferedDocs(2);
     modifier.setMaxBufferedDeleteTerms(2);
     
@@ -355,8 +334,8 @@
   // test deleteAll() w/ near real-time reader
   public void testDeleteAllNRT() throws IOException {
     Directory dir = new MockRAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir, false,
-                                           new WhitespaceAnalyzer(), true);
+    IndexWriter modifier = new IndexWriter(dir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     modifier.setMaxBufferedDocs(2);
     modifier.setMaxBufferedDeleteTerms(2);
     
@@ -445,13 +424,10 @@
     int START_COUNT = 157;
     int END_COUNT = 144;
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
-
       // First build up a starting index:
       MockRAMDirectory startDir = new MockRAMDirectory();
-      IndexWriter writer = new IndexWriter(startDir, autoCommit,
-                                           new WhitespaceAnalyzer(), true);
+      IndexWriter writer = new IndexWriter(startDir,
+                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
       for (int i = 0; i < 157; i++) {
         Document d = new Document();
         d.add(new Field("id", Integer.toString(i), Field.Store.YES,
@@ -473,8 +449,8 @@
       while (!done) {
         MockRAMDirectory dir = new MockRAMDirectory(startDir);
         dir.setPreventDoubleWrite(false);
-        IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                               new WhitespaceAnalyzer());
+        IndexWriter modifier = new IndexWriter(dir,
+                                               new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
 
         modifier.setMaxBufferedDocs(1000); // use flush or close
         modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
@@ -626,7 +602,6 @@
         // Try again with 10 more bytes of free space:
         diskFree += 10;
       }
-    }
   }
 
   // This test tests that buffered deletes are cleared when
@@ -677,11 +652,9 @@
         "Venice has lots of canals" };
     String[] text = { "Amsterdam", "Venice" };
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
       MockRAMDirectory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
+      IndexWriter modifier = new IndexWriter(dir,
+                                             new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
       modifier.setUseCompoundFile(true);
       modifier.setMaxBufferedDeleteTerms(2);
 
@@ -757,7 +730,6 @@
 
       modifier.close();
       dir.close();
-    }
   }
 
   // This test tests that the files created by the docs writer before
@@ -787,11 +759,9 @@
         "Venice has lots of canals" };
     String[] text = { "Amsterdam", "Venice" };
 
-    for(int pass=0;pass<2;pass++) {
-      boolean autoCommit = (0==pass);
       MockRAMDirectory dir = new MockRAMDirectory();
-      IndexWriter modifier = new IndexWriter(dir, autoCommit,
-                                             new WhitespaceAnalyzer(), true);
+      IndexWriter modifier = new IndexWriter(dir,
+                                             new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
 
       dir.failOn(failure.reset());
 
@@ -825,9 +795,6 @@
       }
 
       modifier.close();
-
-    }
-
   }
 
   private String arrayToString(String[] l) {

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java Thu Oct  8 20:56:40 2009
@@ -125,7 +125,7 @@
   public void testMaxBufferedDocsChange() throws IOException {
     Directory dir = new RAMDirectory();
 
-    IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(101);
     writer.setMergeFactor(101);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -139,7 +139,7 @@
       }
       writer.close();
 
-      writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(101);
       writer.setMergeFactor(101);
       writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -158,6 +158,9 @@
     for (int i = 100; i < 1000; i++) {
       addDoc(writer);
     }
+    writer.commit();
+    ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
+    writer.commit();
     checkInvariants(writer);
 
     writer.close();
@@ -167,7 +170,7 @@
   public void testMergeDocCount0() throws IOException {
     Directory dir = new RAMDirectory();
 
-    IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
     writer.setMaxBufferedDocs(10);
     writer.setMergeFactor(100);
@@ -182,7 +185,7 @@
     reader.deleteDocuments(new Term("content", "aaa"));
     reader.close();
 
-    writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
     writer.setMaxBufferedDocs(10);
     writer.setMergeFactor(5);
@@ -191,6 +194,9 @@
     for (int i = 0; i < 10; i++) {
       addDoc(writer);
     }
+    writer.commit();
+    ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
+    writer.commit();
     checkInvariants(writer);
     assertEquals(10, writer.docCount());
 

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing.java Thu Oct  8 20:56:40 2009
@@ -115,8 +115,8 @@
     Run one indexer and 2 searchers against single index as
     stress test.
   */
-  public void runStressTest(Directory directory, boolean autoCommit, MergeScheduler mergeScheduler) throws Exception {
-    IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true);
+  public void runStressTest(Directory directory, MergeScheduler mergeScheduler) throws Exception {
+    IndexWriter modifier = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
 
     modifier.setMaxBufferedDocs(10);
 
@@ -166,35 +166,20 @@
   public void testStressIndexAndSearching() throws Exception {
     RANDOM = newRandom();
 
-    // RAMDir
-    Directory directory = new MockRAMDirectory();
-    runStressTest(directory, true, null);
-    directory.close();
-
-    // FSDir
-    File dirPath = _TestUtil.getTempDir("lucene.test.stress");
-    directory = FSDirectory.open(dirPath);
-    runStressTest(directory, true, null);
-    directory.close();
-
-    // With ConcurrentMergeScheduler, in RAMDir
-    directory = new MockRAMDirectory();
-    runStressTest(directory, true, new ConcurrentMergeScheduler());
-    directory.close();
-
     // With ConcurrentMergeScheduler, in FSDir
-    directory = FSDirectory.open(dirPath);
-    runStressTest(directory, true, new ConcurrentMergeScheduler());
+    File dirPath = _TestUtil.getTempDir("lucene.test.stress");
+    Directory directory = FSDirectory.open(dirPath);
+    runStressTest(directory, new ConcurrentMergeScheduler());
     directory.close();
 
     // With ConcurrentMergeScheduler and autoCommit=false, in RAMDir
     directory = new MockRAMDirectory();
-    runStressTest(directory, false, new ConcurrentMergeScheduler());
+    runStressTest(directory, new ConcurrentMergeScheduler());
     directory.close();
 
     // With ConcurrentMergeScheduler and autoCommit=false, in FSDir
     directory = FSDirectory.open(dirPath);
-    runStressTest(directory, false, new ConcurrentMergeScheduler());
+    runStressTest(directory, new ConcurrentMergeScheduler());
     directory.close();
 
     _TestUtil.rmDir(dirPath);

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing2.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestStressIndexing2.java Thu Oct  8 20:56:40 2009
@@ -32,7 +32,6 @@
   static int maxFields=4;
   static int bigFieldSize=10;
   static boolean sameFieldOrder=false;
-  static boolean autoCommit=false;
   static int mergeFactor=3;
   static int maxBufferedDocs=3;
   static int seed=0;
@@ -41,8 +40,8 @@
 
   public class MockIndexWriter extends IndexWriter {
 
-    public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException {
-      super(dir, autoCommit, a, create);
+    public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException {
+      super(dir, a, create, mfl);
     }
 
     boolean testPoint(String name) {
@@ -88,7 +87,6 @@
     r = newRandom();
     for (int i=0; i<100; i++) {  // increase iterations for better testing
       sameFieldOrder=r.nextBoolean();
-      autoCommit=r.nextBoolean();
       mergeFactor=r.nextInt(3)+2;
       maxBufferedDocs=r.nextInt(3)+2;
       seed++;
@@ -124,7 +122,7 @@
   
   public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
     Map docs = new HashMap();
-    IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+    IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     w.setUseCompoundFile(false);
 
     /***
@@ -176,7 +174,7 @@
   public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
     Map docs = new HashMap();
     for(int iter=0;iter<3;iter++) {
-      IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+      IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
       w.setUseCompoundFile(false);
 
       // force many merges

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestThreadedOptimize.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestThreadedOptimize.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestThreadedOptimize.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/index/TestThreadedOptimize.java Thu Oct  8 20:56:40 2009
@@ -51,9 +51,9 @@
     failed = true;
   }
 
-  public void runTest(Directory directory, boolean autoCommit, MergeScheduler merger) throws Exception {
+  public void runTest(Directory directory, MergeScheduler merger) throws Exception {
 
-    IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
+    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMaxBufferedDocs(2);
     if (merger != null)
       writer.setMergeScheduler(merger);
@@ -118,11 +118,9 @@
 
       assertEquals(expectedDocCount, writer.docCount());
 
-      if (!autoCommit) {
-        writer.close();
-        writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
-        writer.setMaxBufferedDocs(2);
-      }
+      writer.close();
+      writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer.setMaxBufferedDocs(2);
 
       IndexReader reader = IndexReader.open(directory, true);
       assertTrue(reader.isOptimized());
@@ -138,10 +136,8 @@
   */
   public void testThreadedOptimize() throws Exception {
     Directory directory = new MockRAMDirectory();
-    runTest(directory, false, new SerialMergeScheduler());
-    runTest(directory, true, new SerialMergeScheduler());
-    runTest(directory, false, new ConcurrentMergeScheduler());
-    runTest(directory, true, new ConcurrentMergeScheduler());
+    runTest(directory, new SerialMergeScheduler());
+    runTest(directory, new ConcurrentMergeScheduler());
     directory.close();
 
     String tempDir = System.getProperty("tempDir");
@@ -150,10 +146,8 @@
 
     String dirName = tempDir + "/luceneTestThreadedOptimize";
     directory = FSDirectory.open(new File(dirName));
-    runTest(directory, false, new SerialMergeScheduler());
-    runTest(directory, true, new SerialMergeScheduler());
-    runTest(directory, false, new ConcurrentMergeScheduler());
-    runTest(directory, true, new ConcurrentMergeScheduler());
+    runTest(directory, new SerialMergeScheduler());
+    runTest(directory, new ConcurrentMergeScheduler());
     directory.close();
     _TestUtil.rmDir(dirName);
   }

Modified: lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java?rev=823320&r1=823319&r2=823320&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (original)
+++ lucene/java/branches/lucene_2_9_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java Thu Oct  8 20:56:40 2009
@@ -103,7 +103,7 @@
     RAMDirectory directory = new RAMDirectory();
     PayloadAnalyzer analyzer = new PayloadAnalyzer();
     IndexWriter writer
-            = new IndexWriter(directory, analyzer, true);
+            = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setSimilarity(similarity);
     //writer.infoStream = System.out;
     for (int i = 0; i < numDocs; i++) {



Mime
View raw message