lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r922583 [3/8] - in /lucene/java/trunk: ./ contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/ contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ contrib/ant/src/java/org/apache/lucene/ant/ contrib/benchmar...
Date Sat, 13 Mar 2010 15:32:53 GMT
Added: lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriterConfig.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriterConfig.java?rev=922583&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriterConfig.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriterConfig.java Sat Mar 13 15:32:48 2010
@@ -0,0 +1,518 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.DocumentsWriter.IndexingChain;
+import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.util.Version;
+
+/**
+ * Holds all the configuration of {@link IndexWriter}. This object is only used
+ * while constructing a new IndexWriter. Those settings cannot be changed
+ * afterwards, except instantiating a new IndexWriter.
+ * <p>
+ * All setter methods return {@link IndexWriterConfig} to allow chaining
+ * settings conveniently. Thus someone can do:
+ * 
+ * <pre>
+ * IndexWriterConfig conf = new IndexWriterConfig(analyzer);
+ * conf.setter1().setter2();
+ * </pre>
+ * 
+ * @since 3.1
+ */
+public final class IndexWriterConfig implements Cloneable {
+
+  public static final int UNLIMITED_FIELD_LENGTH = Integer.MAX_VALUE;
+
+  /**
+   * Specifies the open mode for {@link IndexWriter}:
+   * <ul>
+   * {@link #CREATE} - creates a new index or overwrites an existing one.
+   * {@link #CREATE_OR_APPEND} - creates a new index if one does not exist,
+   * otherwise it opens the index and documents will be appended.
+   * {@link #APPEND} - opens an existing index.
+   * </ul>
+   */
+  public static enum OpenMode { CREATE, APPEND, CREATE_OR_APPEND }
+  
+  /** Default value is 128. Change using {@link #setTermIndexInterval(int)}. */
+  public static final int DEFAULT_TERM_INDEX_INTERVAL = 128;
+
+  /** Denotes a flush trigger is disabled. */
+  public final static int DISABLE_AUTO_FLUSH = -1;
+
+  /** Disabled by default (because IndexWriter flushes by RAM usage by default). */
+  public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH;
+
+  /** Disabled by default (because IndexWriter flushes by RAM usage by default). */
+  public final static int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH;
+
+  /**
+   * Default value is 16 MB (which means flush when buffered docs consume
+   * approximately 16 MB RAM).
+   */
+  public final static double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0;
+
+  /**
+   * Default value for the write lock timeout (1,000 ms).
+   * 
+   * @see #setDefaultWriteLockTimeout(long)
+   */
+  public static long WRITE_LOCK_TIMEOUT = 1000;
+
+  /**
+   * Sets the default (for any instance) maximum time to wait for a write lock
+   * (in milliseconds).
+   */
+  public static void setDefaultWriteLockTimeout(long writeLockTimeout) {
+    WRITE_LOCK_TIMEOUT = writeLockTimeout;
+  }
+
+  /**
+   * Returns the default write lock timeout for newly instantiated
+   * IndexWriterConfigs.
+   * 
+   * @see #setDefaultWriteLockTimeout(long)
+   */
+  public static long getDefaultWriteLockTimeout() {
+    return WRITE_LOCK_TIMEOUT;
+  }
+
+  private Analyzer analyzer;
+  private IndexDeletionPolicy delPolicy;
+  private IndexCommit commit;
+  private OpenMode openMode;
+  private int maxFieldLength;
+  private Similarity similarity;
+  private int termIndexInterval;
+  private MergeScheduler mergeScheduler;
+  private long writeLockTimeout;
+  private int maxBufferedDeleteTerms;
+  private double ramBufferSizeMB;
+  private int maxBufferedDocs;
+  private IndexingChain indexingChain;
+  private IndexReaderWarmer mergedSegmentWarmer;
+  
+  // required for clone
+  private Version matchVersion;
+
+  /**
+   * Creates a new config that with defaults that match the specified
+   * {@link Version} as well as the default {@link Analyzer}. {@link Version} is
+   * a placeholder for future changes. The default settings are relevant to 3.1
+   * and before. In the future, if different settings will apply to different
+   * versions, they will be documented here.
+   */
+  public IndexWriterConfig(Version matchVersion, Analyzer analyzer) {
+    this.matchVersion = matchVersion;
+    this.analyzer = analyzer;
+    delPolicy = new KeepOnlyLastCommitDeletionPolicy();
+    commit = null;
+    openMode = OpenMode.CREATE_OR_APPEND;
+    maxFieldLength = UNLIMITED_FIELD_LENGTH;
+    similarity = Similarity.getDefault();
+    termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL;
+    mergeScheduler = new ConcurrentMergeScheduler();
+    writeLockTimeout = WRITE_LOCK_TIMEOUT;
+    maxBufferedDeleteTerms = DEFAULT_MAX_BUFFERED_DELETE_TERMS;
+    ramBufferSizeMB = DEFAULT_RAM_BUFFER_SIZE_MB;
+    maxBufferedDocs = DEFAULT_MAX_BUFFERED_DOCS;
+    indexingChain = DocumentsWriter.defaultIndexingChain;
+    mergedSegmentWarmer = null;
+  }
+  
+  @Override
+  public Object clone() {
+    // Shallow clone is the only thing that's possible, since parameters like
+    // analyzer, index commit etc. do not implemnt Cloneable.
+    try {
+      return super.clone();
+    } catch (CloneNotSupportedException e) {
+      // should not happen
+      throw new RuntimeException(e);
+    }
+  }
+
+  /** Returns the default analyzer to use for indexing documents. */
+  public Analyzer getAnalyzer() {
+    return analyzer;
+  }
+
+  /** Specifies {@link OpenMode} of that index. */
+  public IndexWriterConfig setOpenMode(OpenMode openMode) {
+    this.openMode = openMode;
+    return this;
+  }
+  
+  /** Returns the {@link OpenMode} set by {@link #setOpenMode(OpenMode)}. */
+  public OpenMode getOpenMode() {
+    return openMode;
+  }
+
+  /**
+   * Expert: allows an optional {@link IndexDeletionPolicy} implementation to be
+   * specified. You can use this to control when prior commits are deleted from
+   * the index. The default policy is {@link KeepOnlyLastCommitDeletionPolicy}
+   * which removes all prior commits as soon as a new commit is done (this
+   * matches behavior before 2.2). Creating your own policy can allow you to
+   * explicitly keep previous "point in time" commits alive in the index for
+   * some time, to allow readers to refresh to the new commit without having the
+   * old commit deleted out from under them. This is necessary on filesystems
+   * like NFS that do not support "delete on last close" semantics, which
+   * Lucene's "point in time" search normally relies on.
+   * <p>
+   * <b>NOTE:</b> the deletion policy cannot be null. If <code>null</code> is
+   * passed, the deletion policy will be set to the default.
+   */
+  public IndexWriterConfig setIndexDeletionPolicy(IndexDeletionPolicy delPolicy) {
+    this.delPolicy = delPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : delPolicy;
+    return this;
+  }
+
+  /**
+   * Returns the {@link IndexDeletionPolicy} specified in
+   * {@link #setIndexDeletionPolicy(IndexDeletionPolicy)} or the default
+   * {@link KeepOnlyLastCommitDeletionPolicy}/
+   */
+  public IndexDeletionPolicy getIndexDeletionPolicy() {
+    return delPolicy;
+  }
+
+  /**
+   * The maximum number of terms that will be indexed for a single field in a
+   * document. This limits the amount of memory required for indexing, so that
+   * collections with very large files will not crash the indexing process by
+   * running out of memory. This setting refers to the number of running terms,
+   * not to the number of different terms.
+   * <p>
+   * <b>NOTE:</b> this silently truncates large documents, excluding from the
+   * index all terms that occur further in the document. If you know your source
+   * documents are large, be sure to set this value high enough to accomodate
+   * the expected size. If you set it to {@link #UNLIMITED_FIELD_LENGTH}, then
+   * the only limit is your memory, but you should anticipate an
+   * OutOfMemoryError.
+   * <p>
+   * By default it is set to {@link #UNLIMITED_FIELD_LENGTH}.
+   */
+  public IndexWriterConfig setMaxFieldLength(int maxFieldLength) {
+    this.maxFieldLength = maxFieldLength;
+    return this;
+  }
+
+  /**
+   * Returns the maximum number of terms that will be indexed for a single field
+   * in a document.
+   * 
+   * @see #setMaxFieldLength(int)
+   */
+  public int getMaxFieldLength() {
+    return maxFieldLength;
+  }
+
+  /**
+   * Expert: allows to open a certain commit point. The default is null which
+   * opens the latest commit point.
+   */
+  public IndexWriterConfig setIndexCommit(IndexCommit commit) {
+    this.commit = commit;
+    return this;
+  }
+
+  /**
+   * Returns the {@link IndexCommit} as specified in
+   * {@link #setIndexCommit(IndexCommit)} or the default, <code>null</code>
+   * which specifies to open the latest index commit point.
+   */
+  public IndexCommit getIndexCommit() {
+    return commit;
+  }
+
+  /**
+   * Expert: set the {@link Similarity} implementation used by this IndexWriter.
+   * <p>
+   * <b>NOTE:</b> the similarity cannot be null. If <code>null</code> is passed,
+   * the similarity will be set to the default.
+   * 
+   * @see Similarity#setDefault(Similarity)
+   */
+  public IndexWriterConfig setSimilarity(Similarity similarity) {
+    this.similarity = similarity == null ? Similarity.getDefault() : similarity;
+    return this;
+  }
+
+  /**
+   * Expert: returns the {@link Similarity} implementation used by this
+   * IndexWriter. This defaults to the current value of
+   * {@link Similarity#getDefault()}.
+   */
+  public Similarity getSimilarity() {
+    return similarity;
+  }
+  
+  /**
+   * Expert: set the interval between indexed terms. Large values cause less
+   * memory to be used by IndexReader, but slow random-access to terms. Small
+   * values cause more memory to be used by an IndexReader, and speed
+   * random-access to terms.
+   * <p>
+   * This parameter determines the amount of computation required per query
+   * term, regardless of the number of documents that contain that term. In
+   * particular, it is the maximum number of other terms that must be scanned
+   * before a term is located and its frequency and position information may be
+   * processed. In a large index with user-entered query terms, query processing
+   * time is likely to be dominated not by term lookup but rather by the
+   * processing of frequency and positional data. In a small index or when many
+   * uncommon query terms are generated (e.g., by wildcard queries) term lookup
+   * may become a dominant cost.
+   * <p>
+   * In particular, <code>numUniqueTerms/interval</code> terms are read into
+   * memory by an IndexReader, and, on average, <code>interval/2</code> terms
+   * must be scanned for each random term access.
+   * 
+   * @see #DEFAULT_TERM_INDEX_INTERVAL
+   */
+  public IndexWriterConfig setTermIndexInterval(int interval) {
+    this.termIndexInterval = interval;
+    return this;
+  }
+
+  /**
+   * Returns the interval between indexed terms.
+   * 
+   * @see #setTermIndexInterval(int)
+   */
+  public int getTermIndexInterval() {
+    return termIndexInterval;
+  }
+
+  /**
+   * Expert: sets the merge scheduler used by this writer. The default is
+   * {@link ConcurrentMergeScheduler}.
+   * <p>
+   * <b>NOTE:</b> the merge scheduler cannot be null. If <code>null</code> is
+   * passed, the merge scheduler will be set to the default.
+   */
+  public IndexWriterConfig setMergeScheduler(MergeScheduler mergeScheduler) {
+    this.mergeScheduler = mergeScheduler == null ? new ConcurrentMergeScheduler() : mergeScheduler;
+    return this;
+  }
+
+  /**
+   * Returns the {@link MergeScheduler} that was set by
+   * {@link #setMergeScheduler(MergeScheduler)}
+   */
+  public MergeScheduler getMergeScheduler() {
+    return mergeScheduler;
+  }
+
+  /**
+   * Sets the maximum time to wait for a write lock (in milliseconds) for this
+   * instance. You can change the default value for all instances by calling
+   * {@link #setDefaultWriteLockTimeout(long)}.
+   */
+  public IndexWriterConfig setWriteLockTimeout(long writeLockTimeout) {
+    this.writeLockTimeout = writeLockTimeout;
+    return this;
+  }
+  
+  /**
+   * Returns allowed timeout when acquiring the write lock.
+   * 
+   * @see #setWriteLockTimeout(long)
+   */
+  public long getWriteLockTimeout() {
+    return writeLockTimeout;
+  }
+
+  /**
+   * Determines the minimal number of delete terms required before the buffered
+   * in-memory delete terms are applied and flushed. If there are documents
+   * buffered in memory at the time, they are merged and a new segment is
+   * created.
+
+   * <p>Disabled by default (writer flushes by RAM usage).
+   * 
+   * @throws IllegalArgumentException if maxBufferedDeleteTerms
+   * is enabled but smaller than 1
+   * @see #setRAMBufferSizeMB
+   */
+  public IndexWriterConfig setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) {
+    if (maxBufferedDeleteTerms != DISABLE_AUTO_FLUSH
+        && maxBufferedDeleteTerms < 1)
+      throw new IllegalArgumentException(
+          "maxBufferedDeleteTerms must at least be 1 when enabled");
+    this.maxBufferedDeleteTerms = maxBufferedDeleteTerms;
+    return this;
+  }
+
+  /**
+   * Returns the number of buffered deleted terms that will trigger a flush if
+   * enabled.
+   * 
+   * @see #setMaxBufferedDeleteTerms(int)
+   */
+  public int getMaxBufferedDeleteTerms() {
+    return maxBufferedDeleteTerms;
+  }
+
+  /**
+   * Determines the amount of RAM that may be used for buffering added documents
+   * and deletions before they are flushed to the Directory. Generally for
+   * faster indexing performance it's best to flush by RAM usage instead of
+   * document count and use as large a RAM buffer as you can.
+   * 
+   * <p>
+   * When this is set, the writer will flush whenever buffered documents and
+   * deletions use this much RAM. Pass in {@link #DISABLE_AUTO_FLUSH} to prevent
+   * triggering a flush due to RAM usage. Note that if flushing by document
+   * count is also enabled, then the flush will be triggered by whichever comes
+   * first.
+   * 
+   * <p>
+   * <b>NOTE</b>: the account of RAM usage for pending deletions is only
+   * approximate. Specifically, if you delete by Query, Lucene currently has no
+   * way to measure the RAM usage of individual Queries so the accounting will
+   * under-estimate and you should compensate by either calling commit()
+   * periodically yourself, or by using {@link #setMaxBufferedDeleteTerms(int)}
+   * to flush by count instead of RAM usage (each buffered delete Query counts 
+   * as one).
+   * 
+   * <p>
+   * <b>NOTE</b>: because IndexWriter uses <code>int</code>s when managing its
+   * internal storage, the absolute maximum value for this setting is somewhat
+   * less than 2048 MB. The precise limit depends on various factors, such as
+   * how large your documents are, how many fields have norms, etc., so it's
+   * best to set this value comfortably under 2048.
+   * 
+   * <p>
+   * The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.
+   * 
+   * @throws IllegalArgumentException
+   *           if ramBufferSize is enabled but non-positive, or it disables
+   *           ramBufferSize when maxBufferedDocs is already disabled
+   */
+  public IndexWriterConfig setRAMBufferSizeMB(double ramBufferSizeMB) {
+    if (ramBufferSizeMB > 2048.0) {
+      throw new IllegalArgumentException("ramBufferSize " + ramBufferSizeMB
+          + " is too large; should be comfortably less than 2048");
+    }
+    if (ramBufferSizeMB != DISABLE_AUTO_FLUSH && ramBufferSizeMB <= 0.0)
+      throw new IllegalArgumentException(
+          "ramBufferSize should be > 0.0 MB when enabled");
+    if (ramBufferSizeMB == DISABLE_AUTO_FLUSH && maxBufferedDocs == DISABLE_AUTO_FLUSH)
+      throw new IllegalArgumentException(
+          "at least one of ramBufferSize and maxBufferedDocs must be enabled");
+    this.ramBufferSizeMB = ramBufferSizeMB;
+    return this;
+  }
+
+  /** Returns the value set by {@link #setRAMBufferSizeMB(double)} if enabled. */
+  public double getRAMBufferSizeMB() {
+    return ramBufferSizeMB;
+  }
+
+  /**
+   * Determines the minimal number of documents required before the buffered
+   * in-memory documents are flushed as a new Segment. Large values generally
+   * give faster indexing.
+   * 
+   * <p>
+   * When this is set, the writer will flush every maxBufferedDocs added
+   * documents. Pass in {@link #DISABLE_AUTO_FLUSH} to prevent triggering a
+   * flush due to number of buffered documents. Note that if flushing by RAM
+   * usage is also enabled, then the flush will be triggered by whichever comes
+   * first.
+   * 
+   * <p>
+   * Disabled by default (writer flushes by RAM usage).
+   * 
+   * @see #setRAMBufferSizeMB(double)
+   * 
+   * @throws IllegalArgumentException
+   *           if maxBufferedDocs is enabled but smaller than 2, or it disables
+   *           maxBufferedDocs when ramBufferSize is already disabled
+   */
+  public IndexWriterConfig setMaxBufferedDocs(int maxBufferedDocs) {
+    if (maxBufferedDocs != DISABLE_AUTO_FLUSH && maxBufferedDocs < 2)
+      throw new IllegalArgumentException(
+          "maxBufferedDocs must at least be 2 when enabled");
+    if (maxBufferedDocs == DISABLE_AUTO_FLUSH
+        && ramBufferSizeMB == DISABLE_AUTO_FLUSH)
+      throw new IllegalArgumentException(
+          "at least one of ramBufferSize and maxBufferedDocs must be enabled");
+    this.maxBufferedDocs = maxBufferedDocs;
+    return this;
+  }
+
+  /**
+   * Returns the number of buffered added documents that will trigger a flush if
+   * enabled.
+   * 
+   * @see #setMaxBufferedDocs(int)
+   */
+  public int getMaxBufferedDocs() {
+    return maxBufferedDocs;
+  }
+
+  /** Set the merged segment warmer. See {@link IndexReaderWarmer}. */
+  public IndexWriterConfig setMergedSegmentWarmer(IndexReaderWarmer mergeSegmentWarmer) {
+    this.mergedSegmentWarmer = mergeSegmentWarmer;
+    return this;
+  }
+
+  /** Returns the current merged segment warmer. See {@link IndexReaderWarmer}. */
+  public IndexReaderWarmer getMergedSegmentWarmer() {
+    return mergedSegmentWarmer;
+  }
+
+
+  /** Expert: sets the {@link DocConsumer} chain to be used to process documents. */
+  IndexWriterConfig setIndexingChain(IndexingChain indexingChain) {
+    this.indexingChain = indexingChain == null ? DocumentsWriter.defaultIndexingChain : indexingChain;
+    return this;
+  }
+  
+  /** Returns the indexing chain set on {@link #setIndexingChain(IndexingChain)}. */
+  IndexingChain getIndexingChain() {
+    return indexingChain;
+  }
+  
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("matchVersion=").append(matchVersion).append("\n");
+    sb.append("analyzer=").append(analyzer.getClass().getName()).append("\n");
+    sb.append("delPolicy=").append(delPolicy.getClass().getName()).append("\n");
+    sb.append("commit=").append(commit == null ? "null" : commit.getClass().getName()).append("\n");
+    sb.append("openMode=").append(openMode).append("\n");
+    sb.append("maxFieldLength=").append(maxFieldLength).append("\n");
+    sb.append("similarity=").append(similarity.getClass().getName()).append("\n");
+    sb.append("termIndexInterval=").append(termIndexInterval).append("\n");
+    sb.append("mergeScheduler=").append(mergeScheduler.getClass().getName()).append("\n");
+    sb.append("default WRITE_LOCK_TIMEOUT=").append(WRITE_LOCK_TIMEOUT).append("\n");
+    sb.append("writeLockTimeout=").append(writeLockTimeout).append("\n");
+    sb.append("maxBufferedDeleteTerms=").append(maxBufferedDeleteTerms).append("\n");
+    sb.append("ramBufferSizeMB=").append(ramBufferSizeMB).append("\n");
+    sb.append("maxBufferedDocs=").append(maxBufferedDocs).append("\n");
+    sb.append("mergedSegmentWarmer=").append(mergedSegmentWarmer).append("\n");
+    return sb.toString();
+  }
+}

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriterConfig.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/SegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/SegmentMerger.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/SegmentMerger.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/SegmentMerger.java Sat Mar 13 15:32:48 2010
@@ -48,7 +48,7 @@ final class SegmentMerger {
   
   private Directory directory;
   private String segment;
-  private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
+  private int termIndexInterval = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL;
 
   private List<IndexReader> readers = new ArrayList<IndexReader>();
   private FieldInfos fieldInfos;
@@ -96,7 +96,7 @@ final class SegmentMerger {
         }
       };
     }
-    termIndexInterval = writer.getTermIndexInterval();
+    termIndexInterval = writer.getConfig().getTermIndexInterval();
   }
   
   boolean hasProx() {

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java Sat Mar 13 15:32:48 2010
@@ -24,6 +24,7 @@ import org.apache.lucene.analysis.standa
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.IndexSearcher;
@@ -49,8 +50,9 @@ public class TestDemo extends LuceneTest
     Directory directory = new RAMDirectory();
     // To store an index on disk, use this instead:
     //Directory directory = FSDirectory.open("/tmp/testindex");
-    IndexWriter iwriter = new IndexWriter(directory, analyzer, true,
-                                          new IndexWriter.MaxFieldLength(25000));
+    IndexWriter iwriter = new IndexWriter(directory, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer).setMaxFieldLength(25000));
+    
     Document doc = new Document();
     String text = "This is the text to be indexed.";
     doc.add(new Field("fieldname", text, Field.Store.YES,

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java Sat Mar 13 15:32:48 2010
@@ -18,11 +18,12 @@ package org.apache.lucene;
  */
 import java.io.IOException;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 
@@ -86,15 +87,14 @@ public class TestMergeSchedulerExternal 
     Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
     
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
-    MyMergeScheduler ms = new MyMergeScheduler();
-    writer.setMergeScheduler(ms);
-    writer.setMaxBufferedDocs(2);
-    writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMergeScheduler(new MyMergeScheduler())
+        .setMaxBufferedDocs(2).setRAMBufferSizeMB(
+            IndexWriterConfig.DISABLE_AUTO_FLUSH));
     for(int i=0;i<20;i++)
       writer.addDocument(doc);
 
-    ms.sync();
+    ((MyMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
     writer.close();
     
     assertTrue(mergeThreadCreated);

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java Sat Mar 13 15:32:48 2010
@@ -70,14 +70,14 @@ public class TestSearch extends LuceneTe
 
 
     private void doTestSearch(PrintWriter out, boolean useCompoundFile)
-    throws Exception
-    {
+    throws Exception {
       Directory directory = new RAMDirectory();
       Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
-      IndexWriter writer = new IndexWriter(directory, analyzer, true, 
-                                           IndexWriter.MaxFieldLength.LIMITED);
-
-      writer.setUseCompoundFile(useCompoundFile);
+      IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
+      LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy();
+      lmp.setUseCompoundFile(useCompoundFile);
+      lmp.setUseCompoundDocStore(useCompoundFile);
 
       String[] docs = {
         "a b c d e",

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java Sat Mar 13 15:32:48 2010
@@ -78,10 +78,11 @@ public class TestSearchForDuplicates ext
   private void doTest(PrintWriter out, boolean useCompoundFiles) throws Exception {
       Directory directory = new RAMDirectory();
       Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
-      IndexWriter writer = new IndexWriter(directory, analyzer, true,
-                                           IndexWriter.MaxFieldLength.LIMITED);
-
-      writer.setUseCompoundFile(useCompoundFiles);
+      IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
+      LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy();
+      lmp.setUseCompoundFile(useCompoundFiles);
+      lmp.setUseCompoundDocStore(useCompoundFiles);
 
       final int MAX_DOCS = 225;
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java Sat Mar 13 15:32:48 2010
@@ -31,6 +31,7 @@ import org.apache.lucene.store.IndexInpu
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.TestIndexWriter;
@@ -67,9 +68,10 @@ public class TestSnapshotDeletionPolicy 
     Directory dir = new MockRAMDirectory();
 
     SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
-    IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
-    // Force frequent flushes
-    writer.setMaxBufferedDocs(2);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, 
+        new StandardAnalyzer(TEST_VERSION_CURRENT)).setIndexDeletionPolicy(dp)
+        .setMaxBufferedDocs(2));
     Document doc = new Document();
     doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     for(int i=0;i<7;i++) {
@@ -83,7 +85,8 @@ public class TestSnapshotDeletionPolicy 
     writer.close();
     copyFiles(dir, cp);
     
-    writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+    writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
+        new StandardAnalyzer(TEST_VERSION_CURRENT)).setIndexDeletionPolicy(dp));
     copyFiles(dir, cp);
     for(int i=0;i<7;i++) {
       writer.addDocument(doc);
@@ -95,7 +98,8 @@ public class TestSnapshotDeletionPolicy 
     writer.close();
     copyFiles(dir, cp);
     dp.release();
-    writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+    writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
+        new StandardAnalyzer(TEST_VERSION_CURRENT)).setIndexDeletionPolicy(dp));
     writer.close();
     try {
       copyFiles(dir, cp);
@@ -111,10 +115,10 @@ public class TestSnapshotDeletionPolicy 
     final long stopTime = System.currentTimeMillis() + 1000;
 
     SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
-    final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
-
-    // Force frequent flushes
-    writer.setMaxBufferedDocs(2);
+    final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, 
+        new StandardAnalyzer(TEST_VERSION_CURRENT)).setIndexDeletionPolicy(dp)
+        .setMaxBufferedDocs(2));
 
     final Thread t = new Thread() {
         @Override

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java Sat Mar 13 15:32:48 2010
@@ -27,6 +27,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.TermVector;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermPositions;
 import org.apache.lucene.store.Directory;
@@ -37,7 +38,8 @@ public class TestCachingTokenFilter exte
   
   public void testCaching() throws IOException {
     Directory dir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
     Document doc = new Document();
     TokenStream stream = new TokenStream() {
       private int index = 0;

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java Sat Mar 13 15:32:48 2010
@@ -24,6 +24,7 @@ import org.apache.lucene.document.Docume
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermDocs;
 import org.apache.lucene.queryParser.QueryParser;
@@ -41,9 +42,9 @@ public class TestKeywordAnalyzer extends
   protected void setUp() throws Exception {
     super.setUp();
     directory = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(directory,
-                                         new SimpleAnalyzer(TEST_VERSION_CURRENT),
-                                         true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new SimpleAnalyzer(
+        TEST_VERSION_CURRENT)));
 
     Document doc = new Document();
     doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.NOT_ANALYZED));
@@ -70,7 +71,7 @@ public class TestKeywordAnalyzer extends
 
   public void testMutipleDocument() throws Exception {
     RAMDirectory dir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(dir,new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer()));
     Document doc = new Document();
     doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.ANALYZED));
     writer.addDocument(doc);

Modified: lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java Sat Mar 13 15:32:48 2010
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.PerFie
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.search.IndexSearcher;
@@ -69,8 +70,8 @@ public class CollationTestBase extends L
                                             String firstEnd, String secondBeg,
                                             String secondEnd) throws Exception {
     RAMDirectory ramDir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter
-      (ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
     doc.add(new Field("content", "\u0633\u0627\u0628", 
                       Field.Store.YES, Field.Index.ANALYZED));
@@ -101,8 +102,8 @@ public class CollationTestBase extends L
                                             String firstEnd, String secondBeg,
                                             String secondEnd) throws Exception {
     RAMDirectory ramDir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter
-      (ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
 
     // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
@@ -125,13 +126,12 @@ public class CollationTestBase extends L
     searcher.close();
   }
 
-  public void testFarsiTermRangeQuery
-    (Analyzer analyzer, String firstBeg, String firstEnd, 
-     String secondBeg, String secondEnd) throws Exception {
+  public void testFarsiTermRangeQuery(Analyzer analyzer, String firstBeg,
+      String firstEnd, String secondBeg, String secondEnd) throws Exception {
 
     RAMDirectory farsiIndex = new RAMDirectory();
-    IndexWriter writer = new IndexWriter
-      (farsiIndex, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
     doc.add(new Field("content", "\u0633\u0627\u0628", 
                       Field.Store.YES, Field.Index.ANALYZED));
@@ -178,8 +178,8 @@ public class CollationTestBase extends L
     analyzer.addAnalyzer("France", franceAnalyzer);
     analyzer.addAnalyzer("Sweden", swedenAnalyzer);
     analyzer.addAnalyzer("Denmark", denmarkAnalyzer);
-    IndexWriter writer = new IndexWriter 
-      (indexStore, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
 
     // document data:
     // the tracer field is used to determine which document was hit

Modified: lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java Sat Mar 13 15:32:48 2010
@@ -5,6 +5,7 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.store.MockRAMDirectory;
 
 /**
@@ -27,8 +28,7 @@ import org.apache.lucene.store.MockRAMDi
 /**
  * Tests {@link Document} class.
  */
-public class TestBinaryDocument extends LuceneTestCase
-{
+public class TestBinaryDocument extends LuceneTestCase {
 
   String binaryValStored = "this text will be stored as a byte array in the index";
   String binaryValCompressed = "this text will be also stored and compressed as a byte array in the index";
@@ -58,7 +58,8 @@ public class TestBinaryDocument extends 
     
     /** add the doc to a ram index */
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
     writer.addDocument(doc);
     writer.close();
     
@@ -83,9 +84,7 @@ public class TestBinaryDocument extends 
     dir.close();
   }
   
-  public void testCompressionTools()
-    throws Exception
-  {
+  public void testCompressionTools() throws Exception {
     Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()));
     Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed));
     
@@ -96,7 +95,8 @@ public class TestBinaryDocument extends 
     
     /** add the doc to a ram index */
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
     writer.addDocument(doc);
     writer.close();
     

Modified: lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java Sat Mar 13 15:32:48 2010
@@ -2,6 +2,7 @@ package org.apache.lucene.document;
 
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -151,10 +152,11 @@ public class TestDocument extends Lucene
      *
      * @throws Exception on error
      */
-    public void testGetValuesForIndexedDocument() throws Exception
-    {
+    public void testGetValuesForIndexedDocument() throws Exception {
         RAMDirectory dir = new RAMDirectory();
-        IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+        IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new StandardAnalyzer(
+        TEST_VERSION_CURRENT)));
         writer.addDocument(makeDocumentWithFields());
         writer.close();
 
@@ -225,7 +227,9 @@ public class TestDocument extends Lucene
       doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));
 
       RAMDirectory dir = new RAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+      IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new StandardAnalyzer(
+        TEST_VERSION_CURRENT)));
       writer.addDocument(doc);
       field.setValue("id2");
       writer.addDocument(doc);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java Sat Mar 13 15:32:48 2010
@@ -232,10 +232,9 @@ class DocHelper {
    * @param doc
    * @throws IOException
    */ 
-  public static SegmentInfo writeDoc(Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException
-  {
-    IndexWriter writer = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED);
-    writer.setSimilarity(similarity);
+  public static SegmentInfo writeDoc(Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException {
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer).setSimilarity(similarity));
     //writer.setUseCompoundFile(false);
     writer.addDocument(doc);
     writer.commit();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java Sat Mar 13 15:32:48 2010
@@ -23,6 +23,7 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.store.MockRAMDirectory;
@@ -39,27 +40,30 @@ public class TestAddIndexesNoOptimize ex
 
     IndexWriter writer = null;
 
-    writer = newWriter(dir, true);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
+        new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setOpenMode(OpenMode.CREATE));
     // add 100 documents
     addDocs(writer, 100);
     assertEquals(100, writer.maxDoc());
     writer.close();
 
-    writer = newWriter(aux, true);
-    writer.setUseCompoundFile(false); // use one without a compound file
+    writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE));
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
     // add 40 documents in separate files
     addDocs(writer, 40);
     assertEquals(40, writer.maxDoc());
     writer.close();
 
-    writer = newWriter(aux2, true);
+    writer = newWriter(aux2, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE));
     // add 40 documents in compound files
     addDocs2(writer, 50);
     assertEquals(50, writer.maxDoc());
     writer.close();
 
     // test doc count before segments are merged
-    writer = newWriter(dir, false);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
     assertEquals(100, writer.maxDoc());
     writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
     assertEquals(190, writer.maxDoc());
@@ -73,14 +77,14 @@ public class TestAddIndexesNoOptimize ex
 
     // now add another set in.
     Directory aux3 = new RAMDirectory();
-    writer = newWriter(aux3, true);
+    writer = newWriter(aux3, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     // add 40 documents
     addDocs(writer, 40);
     assertEquals(40, writer.maxDoc());
     writer.close();
 
     // test doc count before segments are merged/index is optimized
-    writer = newWriter(dir, false);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
     assertEquals(190, writer.maxDoc());
     writer.addIndexesNoOptimize(new Directory[] { aux3 });
     assertEquals(230, writer.maxDoc());
@@ -94,7 +98,7 @@ public class TestAddIndexesNoOptimize ex
     verifyTermDocs(dir, new Term("content", "bbb"), 50);
 
     // now optimize it.
-    writer = newWriter(dir, false);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
     writer.optimize();
     writer.close();
 
@@ -107,11 +111,11 @@ public class TestAddIndexesNoOptimize ex
 
     // now add a single document
     Directory aux4 = new RAMDirectory();
-    writer = newWriter(aux4, true);
+    writer = newWriter(aux4, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     addDocs2(writer, 1);
     writer.close();
 
-    writer = newWriter(dir, false);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
     assertEquals(230, writer.maxDoc());
     writer.addIndexesNoOptimize(new Directory[] { aux4 });
     assertEquals(231, writer.maxDoc());
@@ -129,7 +133,7 @@ public class TestAddIndexesNoOptimize ex
     Directory aux = new RAMDirectory();
 
     setUpDirs(dir, aux);
-    IndexWriter writer = newWriter(dir, false);
+    IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
     writer.addIndexesNoOptimize(new Directory[] {aux});
 
     // Adds 10 docs, then replaces them with another 10
@@ -166,7 +170,7 @@ public class TestAddIndexesNoOptimize ex
     Directory aux = new RAMDirectory();
 
     setUpDirs(dir, aux);
-    IndexWriter writer = newWriter(dir, false);
+    IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
 
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
@@ -205,7 +209,7 @@ public class TestAddIndexesNoOptimize ex
     Directory aux = new RAMDirectory();
 
     setUpDirs(dir, aux);
-    IndexWriter writer = newWriter(dir, false);
+    IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
 
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
@@ -246,25 +250,25 @@ public class TestAddIndexesNoOptimize ex
 
     IndexWriter writer = null;
 
-    writer = newWriter(dir, true);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     // add 100 documents
     addDocs(writer, 100);
     assertEquals(100, writer.maxDoc());
     writer.close();
 
-    writer = newWriter(aux, true);
-    writer.setUseCompoundFile(false); // use one without a compound file
-    writer.setMaxBufferedDocs(1000);
+    writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
     // add 140 documents in separate files
     addDocs(writer, 40);
     writer.close();
-    writer = newWriter(aux, true);
-    writer.setUseCompoundFile(false); // use one without a compound file
-    writer.setMaxBufferedDocs(1000);
+    writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
     addDocs(writer, 100);
     writer.close();
 
-    writer = newWriter(dir, false);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
     try {
       // cannot add self
       writer.addIndexesNoOptimize(new Directory[] { aux, dir });
@@ -290,9 +294,10 @@ public class TestAddIndexesNoOptimize ex
 
     setUpDirs(dir, aux);
 
-    IndexWriter writer = newWriter(dir, false);
-    writer.setMaxBufferedDocs(10);
-    writer.setMergeFactor(4);
+    IndexWriter writer = newWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4);
     addDocs(writer, 10);
 
     writer.addIndexesNoOptimize(new Directory[] { aux });
@@ -314,9 +319,8 @@ public class TestAddIndexesNoOptimize ex
 
     setUpDirs(dir, aux);
 
-    IndexWriter writer = newWriter(dir, false);
-    writer.setMaxBufferedDocs(9);
-    writer.setMergeFactor(4);
+    IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(9));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4);
     addDocs(writer, 2);
 
     writer.addIndexesNoOptimize(new Directory[] { aux });
@@ -338,9 +342,10 @@ public class TestAddIndexesNoOptimize ex
 
     setUpDirs(dir, aux);
 
-    IndexWriter writer = newWriter(dir, false);
-    writer.setMaxBufferedDocs(10);
-    writer.setMergeFactor(4);
+    IndexWriter writer = newWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4);
 
     writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) });
     assertEquals(1060, writer.maxDoc());
@@ -367,9 +372,10 @@ public class TestAddIndexesNoOptimize ex
     assertEquals(10, reader.numDocs());
     reader.close();
 
-    IndexWriter writer = newWriter(dir, false);
-    writer.setMaxBufferedDocs(4);
-    writer.setMergeFactor(4);
+    IndexWriter writer = newWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(4));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4);
 
     writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) });
     assertEquals(1020, writer.maxDoc());
@@ -390,9 +396,10 @@ public class TestAddIndexesNoOptimize ex
 
     setUpDirs(dir, aux);
 
-    IndexWriter writer = newWriter(aux2, true);
-    writer.setMaxBufferedDocs(100);
-    writer.setMergeFactor(10);
+    IndexWriter writer = newWriter(aux2, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
     writer.addIndexesNoOptimize(new Directory[] { aux });
     assertEquals(30, writer.maxDoc());
     assertEquals(3, writer.getSegmentCount());
@@ -412,9 +419,9 @@ public class TestAddIndexesNoOptimize ex
     assertEquals(22, reader.numDocs());
     reader.close();
 
-    writer = newWriter(dir, false);
-    writer.setMaxBufferedDocs(6);
-    writer.setMergeFactor(4);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(6));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4);
 
     writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
     assertEquals(1025, writer.maxDoc());
@@ -425,9 +432,9 @@ public class TestAddIndexesNoOptimize ex
     verifyNumDocs(dir, 1025);
   }
 
-  private IndexWriter newWriter(Directory dir, boolean create)
+  private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
       throws IOException {
-    final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED);
+    final IndexWriter writer = new IndexWriter(dir, conf);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
     return writer;
   }
@@ -471,26 +478,25 @@ public class TestAddIndexesNoOptimize ex
   private void setUpDirs(Directory dir, Directory aux) throws IOException {
     IndexWriter writer = null;
 
-    writer = newWriter(dir, true);
-    writer.setMaxBufferedDocs(1000);
+    writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
     // add 1000 documents in 1 segment
     addDocs(writer, 1000);
     assertEquals(1000, writer.maxDoc());
     assertEquals(1, writer.getSegmentCount());
     writer.close();
 
-    writer = newWriter(aux, true);
-    writer.setUseCompoundFile(false); // use one without a compound file
-    writer.setMaxBufferedDocs(100);
-    writer.setMergeFactor(10);
+    writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
     // add 30 documents in 3 segments
     for (int i = 0; i < 3; i++) {
       addDocs(writer, 10);
       writer.close();
-      writer = newWriter(aux, false);
-      writer.setUseCompoundFile(false); // use one without a compound file
-      writer.setMaxBufferedDocs(100);
-      writer.setMergeFactor(10);
+      writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(100));
+      ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
+      ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
+      ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
     }
     assertEquals(30, writer.maxDoc());
     assertEquals(3, writer.getSegmentCount());
@@ -501,18 +507,19 @@ public class TestAddIndexesNoOptimize ex
   public void testHangOnClose() throws IOException {
 
     Directory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
-    writer.setMergePolicy(new LogByteSizeMergePolicy(writer));
-    writer.setMaxBufferedDocs(5);
-    writer.setUseCompoundFile(false);
-    writer.setMergeFactor(100);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(5));
+    LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer);
+    lmp.setUseCompoundFile(false);
+    lmp.setUseCompoundDocStore(false);
+    lmp.setMergeFactor(100);
+    writer.setMergePolicy(lmp);
 
     Document doc = new Document();
     doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
                       Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     for(int i=0;i<60;i++)
       writer.addDocument(doc);
-    writer.setMaxBufferedDocs(200);
+
     Document doc2 = new Document();
     doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
                       Field.Index.NO));
@@ -527,13 +534,13 @@ public class TestAddIndexesNoOptimize ex
     writer.close();
 
     Directory dir2 = new MockRAMDirectory();
-    writer = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
-    LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer);
+    writer = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMergeScheduler(new SerialMergeScheduler()));
+    lmp = new LogByteSizeMergePolicy(writer);
     lmp.setMinMergeMB(0.0001);
+    lmp.setUseCompoundFile(false);
+    lmp.setUseCompoundDocStore(false);
+    lmp.setMergeFactor(4);
     writer.setMergePolicy(lmp);
-    writer.setMergeFactor(4);
-    writer.setUseCompoundFile(false);
-    writer.setMergeScheduler(new SerialMergeScheduler());
     writer.addIndexesNoOptimize(new Directory[] {dir});
     writer.close();
     dir.close();
@@ -544,14 +551,16 @@ public class TestAddIndexesNoOptimize ex
   // is respected when copying tail segments
   public void testTargetCFS() throws IOException {
     Directory dir = new RAMDirectory();
-    IndexWriter writer = newWriter(dir, true);
-    writer.setUseCompoundFile(false);
+    IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false);
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false);
     addDocs(writer, 1);
     writer.close();
 
     Directory other = new RAMDirectory();
-    writer = newWriter(other, true);
-    writer.setUseCompoundFile(true);
+    writer = newWriter(other, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true);
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(true);
     writer.addIndexesNoOptimize(new Directory[] {dir});
     assertTrue(writer.newestSegment().getUseCompoundFile());
     writer.close();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java Sat Mar 13 15:32:48 2010
@@ -19,20 +19,20 @@ package org.apache.lucene.index;
 import org.apache.lucene.util.*;
 import org.apache.lucene.store.*;
 import org.apache.lucene.document.*;
-import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.SimpleAnalyzer;
 
 import java.util.Random;
 import java.io.File;
 import java.io.IOException;
 
 public class TestAtomicUpdate extends LuceneTestCase {
-  private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
-  private Random RANDOM;
+  
+  private static final class MockIndexWriter extends IndexWriter {
 
-  public class MockIndexWriter extends IndexWriter {
+    static Random RANDOM;
 
-    public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException {
-      super(dir, a, create, mfl);
+    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
     }
 
     @Override
@@ -126,9 +126,10 @@ public class TestAtomicUpdate extends Lu
 
     TimedThread[] threads = new TimedThread[4];
 
-    IndexWriter writer = new MockIndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
-    writer.setMaxBufferedDocs(7);
-    writer.setMergeFactor(3);
+    IndexWriter writer = new MockIndexWriter(directory, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))
+        .setMaxBufferedDocs(7));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(3);
 
     // Establish a base index of 100 docs:
     for(int i=0;i<100;i++) {
@@ -183,7 +184,7 @@ public class TestAtomicUpdate extends Lu
     FSDirectory.
   */
   public void testAtomicUpdates() throws Exception {
-    RANDOM = newRandom();
+    MockIndexWriter.RANDOM = newRandom();
     Directory directory;
 
     // First in a RAM directory:

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Sat Mar 13 15:32:48 2010
@@ -38,6 +38,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.document.FieldSelector;
 import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
@@ -52,8 +53,7 @@ import org.apache.lucene.util._TestUtil;
   against it, and add documents to it.
 */
 
-public class TestBackwardsCompatibility extends LuceneTestCase
-{
+public class TestBackwardsCompatibility extends LuceneTestCase {
 
   // Uncomment these cases & run them on an older Lucene
   // version, to generate an index to test backwards
@@ -215,7 +215,8 @@ public class TestBackwardsCompatibility 
         hasTested29++;
       }
 
-      IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+      IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
+          TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
       w.optimize();
       w.close();
 
@@ -355,7 +356,7 @@ public class TestBackwardsCompatibility 
     Directory dir = FSDirectory.open(new File(dirName));
 
     // open writer
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
 
     // add 10 docs
     for(int i=0;i<10;i++) {
@@ -399,7 +400,7 @@ public class TestBackwardsCompatibility 
     searcher.close();
 
     // optimize
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+    writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
     writer.optimize();
     writer.close();
 
@@ -449,7 +450,7 @@ public class TestBackwardsCompatibility 
     searcher.close();
 
     // optimize
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
     writer.optimize();
     writer.close();
 
@@ -471,9 +472,9 @@ public class TestBackwardsCompatibility 
     dirName = fullDir(dirName);
 
     Directory dir = FSDirectory.open(new File(dirName));
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
-    writer.setUseCompoundFile(doCFS);
-    writer.setMaxBufferedDocs(10);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(10));
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(doCFS);
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(doCFS);
     
     for(int i=0;i<35;i++) {
       addDoc(writer, i);
@@ -482,9 +483,9 @@ public class TestBackwardsCompatibility 
     writer.close();
 
     // open fresh writer so we get no prx file in the added segment
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
-    writer.setUseCompoundFile(doCFS);
-    writer.setMaxBufferedDocs(10);
+    writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(10));
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(doCFS);
+    ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(doCFS);
     addNoProxDoc(writer);
     writer.close();
 
@@ -509,8 +510,7 @@ public class TestBackwardsCompatibility 
     try {
       Directory dir = FSDirectory.open(new File(fullDir(outputDir)));
 
-      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
-      writer.setRAMBufferSizeMB(16.0);
+      IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
       for(int i=0;i<35;i++) {
         addDoc(writer, i);
       }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java Sat Mar 13 15:32:48 2010
@@ -34,9 +34,7 @@ public class TestCheckIndex extends Luce
 
   public void testDeletedDocs() throws IOException {
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, 
-                                          IndexWriter.MaxFieldLength.LIMITED);      
-    writer.setMaxBufferedDocs(2);
+    IndexWriter writer  = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(2));
     Document doc = new Document();
     doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     for(int i=0;i<19;i++) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Sat Mar 13 15:32:48 2010
@@ -17,20 +17,18 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.analysis.SimpleAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 
 import org.apache.lucene.util.LuceneTestCase;
 import java.io.IOException;
 
 public class TestConcurrentMergeScheduler extends LuceneTestCase {
   
-  private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
-
   private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
     boolean doFail;
     boolean hitExc;
@@ -68,10 +66,7 @@ public class TestConcurrentMergeSchedule
     FailOnlyOnFlush failure = new FailOnlyOnFlush();
     directory.failOn(failure);
 
-    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
-    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
-    writer.setMergeScheduler(cms);
-    writer.setMaxBufferedDocs(2);
+    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(2));
     Document doc = new Document();
     Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
@@ -115,9 +110,7 @@ public class TestConcurrentMergeSchedule
 
     RAMDirectory directory = new MockRAMDirectory();
 
-    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
-    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
-    writer.setMergeScheduler(cms);
+    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
 
     LogDocMergePolicy mp = new LogDocMergePolicy(writer);
     writer.setMergePolicy(mp);
@@ -157,12 +150,11 @@ public class TestConcurrentMergeSchedule
 
     RAMDirectory directory = new MockRAMDirectory();
 
-    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))
+        .setMaxBufferedDocs(2));
 
     for(int iter=0;iter<7;iter++) {
-      ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
-      writer.setMergeScheduler(cms);
-      writer.setMaxBufferedDocs(2);
 
       for(int j=0;j<21;j++) {
         Document doc = new Document();
@@ -174,7 +166,9 @@ public class TestConcurrentMergeSchedule
       TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");
 
       // Reopen
-      writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer = new IndexWriter(directory, new IndexWriterConfig(
+          TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))
+          .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(2));
     }
 
     writer.close();
@@ -189,13 +183,10 @@ public class TestConcurrentMergeSchedule
     Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
 
-    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(2));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(100);
 
     for(int iter=0;iter<10;iter++) {
-      ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
-      writer.setMergeScheduler(cms);
-      writer.setMaxBufferedDocs(2);
-      writer.setMergeFactor(100);
 
       for(int j=0;j<201;j++) {
         idField.setValue(Integer.toString(iter*201+j));
@@ -210,7 +201,7 @@ public class TestConcurrentMergeSchedule
 
       // Force a bunch of merge threads to kick off so we
       // stress out aborting them on close:
-      writer.setMergeFactor(3);
+      ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(3);
       writer.addDocument(doc);
       writer.commit();
 
@@ -221,7 +212,8 @@ public class TestConcurrentMergeSchedule
       reader.close();
 
       // Reopen
-      writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
+      ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(100);
     }
     writer.close();
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java?rev=922583&r1=922582&r2=922583&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java Sat Mar 13 15:32:48 2010
@@ -20,9 +20,9 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.store.NoLockFactory;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 
@@ -35,10 +35,8 @@ public class TestCrash extends LuceneTes
   private IndexWriter initIndex(MockRAMDirectory dir) throws IOException {
     dir.setLockFactory(NoLockFactory.getNoLockFactory());
 
-    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
-    //writer.setMaxBufferedDocs(2);
-    writer.setMaxBufferedDocs(10);
-    ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
+    IndexWriter writer  = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(10));
+    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
 
     Document doc = new Document();
     doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
@@ -51,7 +49,7 @@ public class TestCrash extends LuceneTes
 
   private void crash(final IndexWriter writer) throws IOException {
     final MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
-    ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getMergeScheduler();
+    ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler();
     dir.crash();
     cms.sync();
     dir.clearCrash();



Mime
View raw message