lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From uschind...@apache.org
Subject svn commit: r905044 [1/4] - in /lucene/java/trunk: ./ src/java/org/apache/lucene/analysis/ src/test/org/apache/lucene/ src/test/org/apache/lucene/analysis/ src/test/org/apache/lucene/collation/ src/test/org/apache/lucene/index/ src/test/org/apache/luce...
Date Sun, 31 Jan 2010 14:05:44 GMT
Author: uschindler
Date: Sun Jan 31 14:05:39 2010
New Revision: 905044

URL: http://svn.apache.org/viewvc?rev=905044&view=rev
Log:
LUCENE-2240, LUCENE-2241: SimpleAnalyzer and WhitespaceAnalyzer now have Version ctors. This commit also fixes all core tests to no longer use the deprecated ctors.

Modified:
    lucene/java/trunk/CHANGES.txt
    lucene/java/trunk/src/java/org/apache/lucene/analysis/SimpleAnalyzer.java
    lucene/java/trunk/src/java/org/apache/lucene/analysis/StopAnalyzer.java
    lucene/java/trunk/src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java
    lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
    lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java
    lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java
    lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderClone.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelTermEnum.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestTransactionRollback.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestTransactions.java
    lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestQueryParser.java
    lucene/java/trunk/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/QueryUtils.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestBoolean2.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanScorer.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestDateFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestDateSort.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestDocBoost.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestDocIdSet.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestElevationComparator.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestExplanations.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestFieldCache.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestFilteredQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestFilteredSearch.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestFuzzyQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestNot.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPhraseQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPositionIncrement.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPrefixFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPrefixQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestQueryTermVector.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestScorerPerf.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSetNorm.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSimilarity.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSort.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSpanQueryFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermRangeFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermRangeQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermScorer.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermVectors.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestThreadSafe.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestWildcard.java
    lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
    lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestBasics.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestSpans.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestLockFactory.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestRAMDirectory.java
    lucene/java/trunk/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java

Modified: lucene/java/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/java/trunk/CHANGES.txt?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/CHANGES.txt (original)
+++ lucene/java/trunk/CHANGES.txt Sun Jan 31 14:05:39 2010
@@ -61,6 +61,9 @@
   members. These were converted to private and unused protected
   constructors removed.  (Steven Rowe via Robert Muir)
 
+* LUCENE-2240: SimpleAnalyzer and WhitespaceAnalyzer now have
+  Version ctors.  (Simon Willnauer via Uwe Schindler)
+
 Bug fixes
 
 * LUCENE-2092: BooleanQuery was ignoring disableCoord in its hashCode
@@ -128,10 +131,11 @@
 * LUCENE-2198: Support protected words in stemming TokenFilters using a
   new KeywordAttribute.  (Simon Willnauer via Uwe Schindler)
   
-* LUCENE-2183: Added Unicode 4 support to CharTokenizer and its subclasses.
-  CharTokenizer now has new int-API which is conditionally preferred to
-  the old char-API depending on the provided Version. Version < 3.1 will
-  use the char-API. (Simon Willnauer via Uwe Schindler)
+* LUCENE-2183, LUCENE-2240, LUCENE-2241: Added Unicode 4 support
+  to CharTokenizer and its subclasses. CharTokenizer now has new
+  int-API which is conditionally preferred to the old char-API depending
+  on the provided Version. Version < 3.1 will use the char-API.
+  (Simon Willnauer via Uwe Schindler)
 
 Optimizations
 

Modified: lucene/java/trunk/src/java/org/apache/lucene/analysis/SimpleAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/analysis/SimpleAnalyzer.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/analysis/SimpleAnalyzer.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/analysis/SimpleAnalyzer.java Sun Jan 31 14:05:39 2010
@@ -19,14 +19,42 @@
 
 import java.io.Reader;
 
-/** An {@link Analyzer} that filters {@link LetterTokenizer} 
- *  with {@link LowerCaseFilter} */
+import org.apache.lucene.util.Version;
 
+/** An {@link Analyzer} that filters {@link LetterTokenizer} 
+ *  with {@link LowerCaseFilter} 
+ * <p>
+ * <a name="version">You must specify the required {@link Version} compatibility
+ * when creating {@link CharTokenizer}:
+ * <ul>
+ * <li>As of 3.1, {@link LowerCaseTokenizer} uses an int based API to normalize and
+ * detect token codepoints. See {@link CharTokenizer#isTokenChar(int)} and
+ * {@link CharTokenizer#normalize(int)} for details.</li>
+ * </ul>
+ * <p>
+ **/
 public final class SimpleAnalyzer extends ReusableAnalyzerBase {
 
+  private final Version matchVersion;
+  
+  /**
+   * Creates a new {@link SimpleAnalyzer}
+   * @param matchVersion Lucene version to match See {@link <a href="#version">above</a>}
+   */
+  public SimpleAnalyzer(Version matchVersion) {
+    this.matchVersion = matchVersion;
+  }
+  
+  /**
+   * Creates a new {@link SimpleAnalyzer}
+   * @deprecated use {@link #SimpleAnalyzer(Version)} instead 
+   */
+  @Deprecated  public SimpleAnalyzer() {
+    this(Version.LUCENE_30);
+  }
   @Override
   protected TokenStreamComponents createComponents(final String fieldName,
       final Reader reader) {
-    return new TokenStreamComponents(new LowerCaseTokenizer(reader));
+    return new TokenStreamComponents(new LowerCaseTokenizer(matchVersion, reader));
   }
 }

Modified: lucene/java/trunk/src/java/org/apache/lucene/analysis/StopAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/analysis/StopAnalyzer.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/analysis/StopAnalyzer.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/analysis/StopAnalyzer.java Sun Jan 31 14:05:39 2010
@@ -99,7 +99,7 @@
   @Override
   protected TokenStreamComponents createComponents(String fieldName,
       Reader reader) {
-    final Tokenizer source = new LowerCaseTokenizer(reader);
+    final Tokenizer source = new LowerCaseTokenizer(matchVersion, reader);
     return new TokenStreamComponents(source, new StopFilter(matchVersion,
           source, stopwords));
   }

Modified: lucene/java/trunk/src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java Sun Jan 31 14:05:39 2010
@@ -19,13 +19,44 @@
 
 import java.io.Reader;
 
-/** An Analyzer that uses {@link WhitespaceTokenizer}. */
+import org.apache.lucene.util.Version;
 
+/**
+ * An Analyzer that uses {@link WhitespaceTokenizer}.
+ * <p>
+ * <a name="version">You must specify the required {@link Version} compatibility
+ * when creating {@link CharTokenizer}:
+ * <ul>
+ * <li>As of 3.1, {@link WhitespaceTokenizer} uses an int based API to normalize and
+ * detect token codepoints. See {@link CharTokenizer#isTokenChar(int)} and
+ * {@link CharTokenizer#normalize(int)} for details.</li>
+ * </ul>
+ * <p>
+ **/
 public final class WhitespaceAnalyzer extends ReusableAnalyzerBase {
-
+  
+  private final Version matchVersion;
+  
+  /**
+   * Creates a new {@link WhitespaceAnalyzer}
+   * @param matchVersion Lucene version to match See {@link <a href="#version">above</a>}
+   */
+  public WhitespaceAnalyzer(Version matchVersion) {
+    this.matchVersion = matchVersion;
+  }
+  
+  /**
+   * Creates a new {@link WhitespaceAnalyzer}
+   * @deprecated use {@link #WhitespaceAnalyzer(Version)} instead 
+   */
+  @Deprecated
+  public WhitespaceAnalyzer() {
+    this(Version.LUCENE_30);
+  }
+  
   @Override
   protected TokenStreamComponents createComponents(final String fieldName,
       final Reader reader) {
-    return new TokenStreamComponents(new WhitespaceTokenizer(reader));
+    return new TokenStreamComponents(new WhitespaceTokenizer(matchVersion, reader));
   }
 }

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java Sun Jan 31 14:05:39 2010
@@ -18,6 +18,7 @@
  */
 import java.io.IOException;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.store.Directory;
@@ -95,7 +96,7 @@
     Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
     
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
     MyMergeScheduler ms = new MyMergeScheduler();
     writer.setMergeScheduler(ms);
     writer.setMaxBufferedDocs(2);

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java Sun Jan 31 14:05:39 2010
@@ -74,7 +74,7 @@
     throws Exception
     {
       Directory directory = new RAMDirectory();
-      Analyzer analyzer = new SimpleAnalyzer();
+      Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
       IndexWriter writer = new IndexWriter(directory, analyzer, true, 
                                            IndexWriter.MaxFieldLength.LIMITED);
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java Sun Jan 31 14:05:39 2010
@@ -79,7 +79,7 @@
 
   private void doTest(PrintWriter out, boolean useCompoundFiles) throws Exception {
       Directory directory = new RAMDirectory();
-      Analyzer analyzer = new SimpleAnalyzer();
+      Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
       IndexWriter writer = new IndexWriter(directory, analyzer, true,
                                            IndexWriter.MaxFieldLength.LIMITED);
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java Sun Jan 31 14:05:39 2010
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.util.Version;
 
 import java.io.StringReader;
 import java.util.List;
@@ -28,7 +29,7 @@
 
   // testLain1Accents() is a copy of TestLatin1AccentFilter.testU().
   public void testLatin1Accents() throws Exception {
-    TokenStream stream = new WhitespaceTokenizer(new StringReader
+    TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader
       ("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ"
       +" Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij"
       +" ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
@@ -1889,7 +1890,7 @@
       expectedOutputTokens.add(expected.toString());
     }
 
-    TokenStream stream = new WhitespaceTokenizer(new StringReader(inputText.toString()));
+    TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(inputText.toString()));
     ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
     TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
     Iterator<String> expectedIter = expectedOutputTokens.iterator();

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java Sun Jan 31 14:05:39 2010
@@ -35,7 +35,7 @@
    }
 
   public void testSimple() throws Exception {
-    Analyzer a = new SimpleAnalyzer();
+    Analyzer a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
     assertAnalyzesTo(a, "foo bar FOO BAR", 
                      new String[] { "foo", "bar", "foo", "bar" });
     assertAnalyzesTo(a, "foo      bar .  FOO <> BAR", 
@@ -55,7 +55,7 @@
   }
 
   public void testNull() throws Exception {
-    Analyzer a = new WhitespaceAnalyzer();
+    Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
     assertAnalyzesTo(a, "foo bar FOO BAR", 
                      new String[] { "foo", "bar", "FOO", "BAR" });
     assertAnalyzesTo(a, "foo      bar .  FOO <> BAR", 
@@ -97,11 +97,11 @@
   public void testPayloadCopy() throws IOException {
     String s = "how now brown cow";
     TokenStream ts;
-    ts = new WhitespaceTokenizer(new StringReader(s));
+    ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s));
     ts = new PayloadSetter(ts);
     verifyPayload(ts);
 
-    ts = new WhitespaceTokenizer(new StringReader(s));
+    ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s));
     ts = new PayloadSetter(ts);
     verifyPayload(ts);
   }
@@ -127,7 +127,7 @@
   
     @Override
     public TokenStream tokenStream(String field, Reader reader) {
-      return new WhitespaceAnalyzer().tokenStream(field, reader);
+      return new WhitespaceAnalyzer(Version.LUCENE_CURRENT).tokenStream(field, reader);
     }
   }
 
@@ -145,7 +145,7 @@
     @Override
     public TokenStream tokenStream(String fieldName, Reader reader) {
       return new LowerCaseFilter(Version.LUCENE_CURRENT,
-          new WhitespaceTokenizer(reader));
+          new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader));
     }
     
   }
@@ -192,8 +192,8 @@
   public void testLowerCaseFilterLowSurrogateLeftover() throws IOException {
     // test if the limit of the termbuffer is correctly used with supplementary
     // chars
-    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(new StringReader(
-        "BogustermBogusterm\udc16"));
+    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, 
+        new StringReader("BogustermBogusterm\udc16"));
     LowerCaseFilter filter = new LowerCaseFilter(Version.LUCENE_CURRENT,
         tokenizer);
     assertTokenStreamContents(filter, new String[] {"bogustermbogusterm\udc16"});

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java Sun Jan 31 14:05:39 2010
@@ -31,13 +31,14 @@
 import org.apache.lucene.index.TermPositions;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.Version;
 
 public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
   private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
   
   public void testCaching() throws IOException {
     Directory dir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
     Document doc = new Document();
     TokenStream stream = new TokenStream() {
       private int index = 0;

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java Sun Jan 31 14:05:39 2010
@@ -100,7 +100,7 @@
 
   public void testLowerCaseTokenizer() throws IOException {
     StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
-    LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(Version.LUCENE_31,
+    LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(Version.LUCENE_CURRENT,
         reader);
     assertTokenStreamContents(tokenizer, new String[] { "tokenizer",
         "\ud801\udc44test" });
@@ -115,7 +115,7 @@
 
   public void testWhitespaceTokenizer() throws IOException {
     StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
-    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_31,
+    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
         reader);
     assertTokenStreamContents(tokenizer, new String[] { "Tokenizer",
         "\ud801\udc1ctest" });

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java Sun Jan 31 14:05:39 2010
@@ -18,12 +18,13 @@
  */
 
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.util.Version;
 
 import java.io.StringReader;
 
 public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase {
   public void testU() throws Exception {
-    TokenStream stream = new WhitespaceTokenizer(new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
+    TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
     ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
     TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
     assertTermEquals("Des", filter, termAtt);

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java Sun Jan 31 14:05:39 2010
@@ -43,7 +43,7 @@
     super.setUp();
     directory = new RAMDirectory();
     IndexWriter writer = new IndexWriter(directory,
-                                         new SimpleAnalyzer(),
+                                         new SimpleAnalyzer(Version.LUCENE_CURRENT),
                                          true, IndexWriter.MaxFieldLength.LIMITED);
 
     Document doc = new Document();
@@ -57,7 +57,7 @@
   }
 
   public void testPerFieldAnalyzer() throws Exception {
-    PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer());
+    PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(Version.LUCENE_CURRENT));
     analyzer.addAnalyzer("partnum", new KeywordAnalyzer());
 
     QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, "description", analyzer);

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java Sun Jan 31 14:05:39 2010
@@ -39,16 +39,16 @@
     String[] output = new String[] { "the", "quick", "brown", "LuceneFox",
         "jumps" };
     assertTokenStreamContents(new LowerCaseFilterMock(
-        new KeywordMarkerTokenFilter(new WhitespaceTokenizer(new StringReader(
+        new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
             "The quIck browN LuceneFox Jumps")), set)), output);
     Set<String> jdkSet = new HashSet<String>();
     jdkSet.add("LuceneFox");
     assertTokenStreamContents(new LowerCaseFilterMock(
-        new KeywordMarkerTokenFilter(new WhitespaceTokenizer(new StringReader(
+        new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
             "The quIck browN LuceneFox Jumps")), jdkSet)), output);
     Set<?> set2 = set;
     assertTokenStreamContents(new LowerCaseFilterMock(
-        new KeywordMarkerTokenFilter(new WhitespaceTokenizer(new StringReader(
+        new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
             "The quIck browN LuceneFox Jumps")), set2)), output);
   }
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java Sun Jan 31 14:05:39 2010
@@ -18,13 +18,14 @@
  */
 
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.util.Version;
 
 import java.io.StringReader;
 
 public class TestLengthFilter extends BaseTokenStreamTestCase {
   
   public void testFilter() throws Exception {
-    TokenStream stream = new WhitespaceTokenizer(
+    TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, 
         new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
     LengthFilter filter = new LengthFilter(stream, 2, 6);
     TermAttribute termAtt = filter.getAttribute(TermAttribute.class);

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java Sun Jan 31 14:05:39 2010
@@ -19,6 +19,8 @@
 
 import java.io.StringReader;
 
+import org.apache.lucene.util.Version;
+
 public class TestMappingCharFilter extends BaseTokenStreamTestCase {
 
   NormalizeCharMap normMap;
@@ -58,55 +60,55 @@
 
   public void testNothingChange() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "x" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[]{"x"}, new int[]{0}, new int[]{1});
   }
 
   public void test1to1() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "h" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[]{"i"}, new int[]{0}, new int[]{1});
   }
 
   public void test1to2() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "j" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[]{"jj"}, new int[]{0}, new int[]{1});
   }
 
   public void test1to3() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "k" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[]{"kkk"}, new int[]{0}, new int[]{1});
   }
 
   public void test2to4() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "ll" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[]{"llll"}, new int[]{0}, new int[]{2});
   }
 
   public void test2to1() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "aa" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[]{"a"}, new int[]{0}, new int[]{2});
   }
 
   public void test3to1() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "bbb" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[]{"b"}, new int[]{0}, new int[]{3});
   }
 
   public void test4to2() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "cccc" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[]{"cc"}, new int[]{0}, new int[]{4});
   }
 
   public void test5to0() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, new StringReader( "empty" ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts, new String[0]);
   }
 
@@ -130,7 +132,7 @@
   //
   public void testTokenStream() throws Exception {
     CharStream cs = new MappingCharFilter( normMap, CharReader.get( new StringReader( "h i j k ll cccc bbb aa" ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts,
       new String[]{"i","i","jj","kkk","llll","cc","b","a"},
       new int[]{0,2,4,6,8,11,16,20},
@@ -151,7 +153,7 @@
   public void testChained() throws Exception {
     CharStream cs = new MappingCharFilter( normMap,
         new MappingCharFilter( normMap, CharReader.get( new StringReader( "aaaa ll h" ) ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
     assertTokenStreamContents(ts,
       new String[]{"a","llllllll","i"},
       new int[]{0,5,8},

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java Sun Jan 31 14:05:39 2010
@@ -3,6 +3,7 @@
 import java.io.StringReader;
 
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.util.Version;
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -25,8 +26,8 @@
   public void testPerField() throws Exception {
     String text = "Qwerty";
     PerFieldAnalyzerWrapper analyzer =
-              new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer());
-    analyzer.addAnalyzer("special", new SimpleAnalyzer());
+              new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+    analyzer.addAnalyzer("special", new SimpleAnalyzer(Version.LUCENE_CURRENT));
 
     TokenStream tokenStream = analyzer.tokenStream("field",
                                             new StringReader(text));

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java Sun Jan 31 14:05:39 2010
@@ -62,7 +62,7 @@
   public void testWithKeywordAttribute() throws IOException {
     CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
     set.add("yourselves");
-    Tokenizer tokenizer = new WhitespaceTokenizer(new StringReader("yourselves yours"));
+    Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("yourselves yours"));
     TokenStream filter = new PorterStemFilter(new KeywordMarkerTokenFilter(tokenizer, set));   
     assertTokenStreamContents(filter, new String[] {"yourselves", "your"});
   }

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java Sun Jan 31 14:05:39 2010
@@ -38,7 +38,7 @@
   public void testExactCase() throws IOException {
     StringReader reader = new StringReader("Now is The Time");
     Set<String> stopWords = new HashSet<String>(Arrays.asList("is", "the", "Time"));
-    TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopWords, false);
+    TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, false);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
     assertEquals("Now", termAtt.term());
@@ -50,7 +50,7 @@
   public void testIgnoreCase() throws IOException {
     StringReader reader = new StringReader("Now is The Time");
     Set<Object> stopWords = new HashSet<Object>(Arrays.asList( "is", "the", "Time" ));
-    TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopWords, true);
+    TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, true);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
     assertEquals("Now", termAtt.term());
@@ -61,7 +61,7 @@
     StringReader reader = new StringReader("Now is The Time");
     String[] stopWords = new String[] { "is", "the", "Time" };
     Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
-    TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet);
+    TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
     assertEquals("Now", termAtt.term());
@@ -87,11 +87,11 @@
     Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
     // with increments
     StringReader reader = new StringReader(sb.toString());
-    StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(reader), stopSet);
+    StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
     doTestStopPositons(stpf,true);
     // without increments
     reader = new StringReader(sb.toString());
-    stpf = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet);
+    stpf = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
     doTestStopPositons(stpf,false);
     // with increments, concatenating two stop filters
     ArrayList<String> a0 = new ArrayList<String>();
@@ -110,7 +110,7 @@
     Set<Object> stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0);
     Set<Object> stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1);
     reader = new StringReader(sb.toString());
-    StopFilter stpf0 = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet0); // first part of the set
+    StopFilter stpf0 = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet0); // first part of the set
     stpf0.setEnablePositionIncrements(true);
     StopFilter stpf01 = new StopFilter(Version.LUCENE_CURRENT, stpf0, stopSet1); // two stop filters concatenated!
     doTestStopPositons(stpf01,true);

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java Sun Jan 31 14:05:39 2010
@@ -76,7 +76,7 @@
 
   
   public void testGeneral() throws IOException {
-    final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(new StringReader(buffer1.toString())));
+    final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString())));
     final TokenStream sink1 = source.newSinkTokenStream();
     final TokenStream sink2 = source.newSinkTokenStream(theFilter);
     
@@ -90,7 +90,7 @@
   }
 
   public void testMultipleSources() throws Exception {
-    final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(new StringReader(buffer1.toString())));
+    final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString())));
     final TeeSinkTokenFilter.SinkTokenStream dogDetector = tee1.newSinkTokenStream(dogFilter);
     final TeeSinkTokenFilter.SinkTokenStream theDetector = tee1.newSinkTokenStream(theFilter);
     final TokenStream source1 = new CachingTokenFilter(tee1);
@@ -99,7 +99,7 @@
     dogDetector.addAttribute(CheckClearAttributesAttribute.class);
     theDetector.addAttribute(CheckClearAttributesAttribute.class);
 
-    final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(new StringReader(buffer2.toString())));
+    final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer2.toString())));
     tee2.addSinkTokenStream(dogDetector);
     tee2.addSinkTokenStream(theDetector);
     final TokenStream source2 = tee2;

Modified: lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java Sun Jan 31 14:05:39 2010
@@ -179,7 +179,7 @@
                                    String usResult) throws Exception {
     RAMDirectory indexStore = new RAMDirectory();
     PerFieldAnalyzerWrapper analyzer
-      = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer());
+      = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
     analyzer.addAnalyzer("US", usAnalyzer);
     analyzer.addAnalyzer("France", franceAnalyzer);
     analyzer.addAnalyzer("Sweden", swedenAnalyzer);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java Sun Jan 31 14:05:39 2010
@@ -29,6 +29,7 @@
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Version;
 
 class DocHelper {
   public static final String FIELD_1_TEXT = "field one text";
@@ -218,7 +219,7 @@
    */ 
   public static SegmentInfo writeDoc(Directory dir, Document doc) throws IOException
   {
-    return writeDoc(dir, new WhitespaceAnalyzer(), Similarity.getDefault(), doc);
+    return writeDoc(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), Similarity.getDefault(), doc);
   }
 
   /**

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java Sun Jan 31 14:05:39 2010
@@ -20,6 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
@@ -428,7 +429,7 @@
 
   private IndexWriter newWriter(Directory dir, boolean create)
       throws IOException {
-    final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), create, IndexWriter.MaxFieldLength.UNLIMITED);
+    final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setMergePolicy(new LogDocMergePolicy(writer));
     return writer;
   }
@@ -502,7 +503,7 @@
   public void testHangOnClose() throws IOException {
 
     Directory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
     writer.setMergePolicy(new LogByteSizeMergePolicy(writer));
     writer.setMaxBufferedDocs(5);
     writer.setUseCompoundFile(false);
@@ -528,7 +529,7 @@
     writer.close();
 
     Directory dir2 = new MockRAMDirectory();
-    writer = new IndexWriter(dir2, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    writer = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
     LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer);
     lmp.setMinMergeMB(0.0001);
     writer.setMergePolicy(lmp);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java Sun Jan 31 14:05:39 2010
@@ -26,7 +26,7 @@
 import java.io.IOException;
 
 public class TestAtomicUpdate extends LuceneTestCase {
-  private static final Analyzer ANALYZER = new SimpleAnalyzer();
+  private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
   private Random RANDOM;
 
   public class MockIndexWriter extends IndexWriter {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Sun Jan 31 14:05:39 2010
@@ -45,6 +45,7 @@
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.ReaderUtil;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 import org.apache.lucene.util._TestUtil;
 
 /*
@@ -217,7 +218,7 @@
         hasTested29++;
       }
 
-      IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
+      IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
       w.optimize();
       w.close();
 
@@ -272,7 +273,7 @@
   }
 
   public void searchIndex(String dirName, String oldName) throws IOException {
-    //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer());
+    //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
     //Query query = parser.parse("handle:1");
 
     dirName = fullDir(dirName);
@@ -357,7 +358,7 @@
     Directory dir = FSDirectory.open(new File(dirName));
 
     // open writer
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
 
     // add 10 docs
     for(int i=0;i<10;i++) {
@@ -401,7 +402,7 @@
     searcher.close();
 
     // optimize
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.optimize();
     writer.close();
 
@@ -451,7 +452,7 @@
     searcher.close();
 
     // optimize
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.optimize();
     writer.close();
 
@@ -473,7 +474,7 @@
     dirName = fullDir(dirName);
 
     Directory dir = FSDirectory.open(new File(dirName));
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
     writer.setUseCompoundFile(doCFS);
     writer.setMaxBufferedDocs(10);
     
@@ -484,7 +485,7 @@
     writer.close();
 
     // open fresh writer so we get no prx file in the added segment
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
     writer.setUseCompoundFile(doCFS);
     writer.setMaxBufferedDocs(10);
     addNoProxDoc(writer);
@@ -511,7 +512,7 @@
     try {
       Directory dir = FSDirectory.open(new File(fullDir(outputDir)));
 
-      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setRAMBufferSizeMB(16.0);
       for(int i=0;i<35;i++) {
         addDoc(writer, i);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java Sun Jan 31 14:05:39 2010
@@ -24,6 +24,7 @@
 import java.util.ArrayList;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
@@ -34,7 +35,7 @@
 
   public void testDeletedDocs() throws IOException {
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true, 
+    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, 
                                           IndexWriter.MaxFieldLength.LIMITED);      
     writer.setMaxBufferedDocs(2);
     Document doc = new Document();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Sun Jan 31 14:05:39 2010
@@ -25,12 +25,13 @@
 import org.apache.lucene.document.Field;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 
 import java.io.IOException;
 
 public class TestConcurrentMergeScheduler extends LuceneTestCase {
   
-  private static final Analyzer ANALYZER = new SimpleAnalyzer();
+  private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
 
   private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
     boolean doFail;

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java Sun Jan 31 14:05:39 2010
@@ -20,6 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.store.NoLockFactory;
@@ -35,7 +36,7 @@
   private IndexWriter initIndex(MockRAMDirectory dir) throws IOException {
     dir.setLockFactory(NoLockFactory.getNoLockFactory());
 
-    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
     //writer.setMaxBufferedDocs(2);
     writer.setMaxBufferedDocs(10);
     ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java Sun Jan 31 14:05:39 2010
@@ -34,6 +34,7 @@
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 
 /*
   Verify we can read the pre-2.1 file format, do searches
@@ -201,7 +202,7 @@
 
     Directory dir = new RAMDirectory();
     ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
     writer.setUseCompoundFile(useCompoundFile);
     writer.close();
 
@@ -210,7 +211,7 @@
       // Record last time when writer performed deletes of
       // past commits
       lastDeleteTime = System.currentTimeMillis();
-      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       for(int j=0;j<17;j++) {
         addDoc(writer);
@@ -271,7 +272,7 @@
       Directory dir = new RAMDirectory();
       policy.dir = dir;
 
-      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       writer.setMergeScheduler(new SerialMergeScheduler());
@@ -280,7 +281,7 @@
       }
       writer.close();
 
-      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
       writer.close();
@@ -318,7 +319,7 @@
           // Open & close a writer and assert that it
           // actually removed something:
           int preCount = dir.listAll().length;
-          writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
+          writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.LIMITED);
           writer.close();
           int postCount = dir.listAll().length;
           assertTrue(postCount < preCount);
@@ -340,7 +341,7 @@
     Directory dir = new MockRAMDirectory();
     policy.dir = dir;
 
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
     writer.setMaxBufferedDocs(2);
     for(int i=0;i<10;i++) {
       addDoc(writer);
@@ -359,7 +360,7 @@
     assertTrue(lastCommit != null);
 
     // Now add 1 doc and optimize
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
     addDoc(writer);
     assertEquals(11, writer.numDocs());
     writer.optimize();
@@ -368,7 +369,7 @@
     assertEquals(7, IndexReader.listCommits(dir).size());
 
     // Now open writer on the commit just before optimize:
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
     assertEquals(10, writer.numDocs());
 
     // Should undo our rollback:
@@ -380,7 +381,7 @@
     assertEquals(11, r.numDocs());
     r.close();
 
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
     assertEquals(10, writer.numDocs());
     // Commits the rollback:
     writer.close();
@@ -396,7 +397,7 @@
     r.close();
 
     // Reoptimize
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
     writer.optimize();
     writer.close();
 
@@ -407,7 +408,7 @@
 
     // Now open writer on the commit just before optimize,
     // but this time keeping only the last commit:
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit);
     assertEquals(10, writer.numDocs());
     
     // Reader still sees optimized index, because writer
@@ -443,7 +444,7 @@
 
       Directory dir = new RAMDirectory();
 
-      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       for(int i=0;i<107;i++) {
@@ -451,7 +452,7 @@
       }
       writer.close();
 
-      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
       writer.close();
@@ -486,7 +487,7 @@
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       for(int j=0;j<N+1;j++) {
-        IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+        IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setMaxBufferedDocs(10);
         writer.setUseCompoundFile(useCompoundFile);
         for(int i=0;i<17;i++) {
@@ -541,14 +542,14 @@
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       Directory dir = new RAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.close();
       Term searchTerm = new Term("content", "aaa");        
       Query query = new TermQuery(searchTerm);
 
       for(int i=0;i<N+1;i++) {
-        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setUseCompoundFile(useCompoundFile);
         for(int j=0;j<17;j++) {
           addDoc(writer);
@@ -565,7 +566,7 @@
         reader.close();
         searcher.close();
       }
-      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setUseCompoundFile(useCompoundFile);
       writer.optimize();
       // this is a commit
@@ -636,7 +637,7 @@
       KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
 
       Directory dir = new RAMDirectory();
-      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
       writer.setMaxBufferedDocs(10);
       writer.setUseCompoundFile(useCompoundFile);
       writer.close();
@@ -645,7 +646,7 @@
 
       for(int i=0;i<N+1;i++) {
 
-        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         writer.setMaxBufferedDocs(10);
         writer.setUseCompoundFile(useCompoundFile);
         for(int j=0;j<17;j++) {
@@ -663,7 +664,7 @@
         reader.close();
         searcher.close();
 
-        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+        writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
         // This will not commit: there are no changes
         // pending because we opened for "create":
         writer.close();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java Sun Jan 31 14:05:39 2010
@@ -35,6 +35,7 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 
 
 /** JUnit adaptation of an older test case DocTest. */
@@ -109,7 +110,7 @@
       PrintWriter out = new PrintWriter(sw, true);
 
       Directory directory = FSDirectory.open(indexDir);
-      IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+      IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
 
       SegmentInfo si1 = indexDoc(writer, "test.txt");
       printSegment(out, si1);
@@ -137,7 +138,7 @@
       out = new PrintWriter(sw, true);
 
       directory = FSDirectory.open(indexDir);
-      writer = new IndexWriter(directory, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+      writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
 
       si1 = indexDoc(writer, "test.txt");
       printSegment(out, si1);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java Sun Jan 31 14:05:39 2010
@@ -39,6 +39,7 @@
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 import org.apache.lucene.util._TestUtil;
 
 public class TestDocumentWriter extends LuceneTestCase {
@@ -61,7 +62,7 @@
   public void testAddDocument() throws Exception {
     Document testDoc = new Document();
     DocHelper.setupDoc(testDoc);
-    Analyzer analyzer = new WhitespaceAnalyzer();
+    Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
     IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
     writer.addDocument(testDoc);
     writer.commit();
@@ -110,7 +111,7 @@
     Analyzer analyzer = new Analyzer() {
       @Override
       public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new WhitespaceTokenizer(reader);
+        return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
       }
 
       @Override
@@ -143,7 +144,7 @@
     Analyzer analyzer = new Analyzer() {
       @Override
       public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new TokenFilter(new WhitespaceTokenizer(reader)) {
+        return new TokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader)) {
           boolean first=true;
           AttributeSource.State state;
 
@@ -207,7 +208,7 @@
 
 
   public void testPreAnalyzedField() throws IOException {
-    IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
     Document doc = new Document();
     
     doc.add(new Field("preanalyzed", new TokenStream() {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java Sun Jan 31 14:05:39 2010
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.*;
 import org.apache.lucene.store.FSDirectory;
@@ -50,7 +51,7 @@
     fieldInfos = new FieldInfos();
     DocHelper.setupDoc(testDoc);
     fieldInfos.add(testDoc);
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
     writer.setUseCompoundFile(false);
     writer.addDocument(testDoc);
     writer.close();
@@ -211,7 +212,7 @@
     FSDirectory tmpDir = FSDirectory.open(file);
     assertTrue(tmpDir != null);
 
-    IndexWriter writer = new IndexWriter(tmpDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(tmpDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
     writer.setUseCompoundFile(false);
     writer.addDocument(testDoc);
     writer.close();
@@ -392,7 +393,7 @@
 
     try {
       Directory dir = new FaultyFSDirectory(indexDir);
-      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
       for(int i=0;i<2;i++)
         writer.addDocument(testDoc);
       writer.optimize();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java Sun Jan 31 14:05:39 2010
@@ -19,6 +19,8 @@
 
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+
 import junit.framework.TestSuite;
 import junit.textui.TestRunner;
 
@@ -97,7 +99,7 @@
    */
   public void testFilterIndexReader() throws Exception {
     RAMDirectory directory = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
+    IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
                                          IndexWriter.MaxFieldLength.LIMITED);
 
     Document d1 = new Document();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?rev=905044&r1=905043&r2=905044&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Sun Jan 31 14:05:39 2010
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.store.Directory;
@@ -40,7 +41,7 @@
 
     Directory dir = new RAMDirectory();
 
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
     writer.setMaxBufferedDocs(10);
     int i;
     for(i=0;i<35;i++) {
@@ -145,7 +146,7 @@
 
     // Open & close a writer: it should delete the above 4
     // files and nothing more:
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
     writer.close();
 
     String[] files2 = dir.listAll();



Mime
View raw message