lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From uschind...@apache.org
Subject svn commit: r827979 [1/3] - in /lucene/java/trunk: ./ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/ co...
Date Wed, 21 Oct 2009 12:12:14 GMT
Author: uschindler
Date: Wed Oct 21 12:12:11 2009
New Revision: 827979

URL: http://svn.apache.org/viewvc?rev=827979&view=rev
Log:
LUCENE-1987: Remove rest of analysis deprecations (StandardAnalyzer, StopAnalyzer)

Modified:
    lucene/java/trunk/CHANGES.txt
    lucene/java/trunk/common-build.xml
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
    lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
    lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
    lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java
    lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
    lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
    lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
    lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
    lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java
    lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java
    lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
    lucene/java/trunk/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
    lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java
    lucene/java/trunk/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
    lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java
    lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
    lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
    lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
    lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
    lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
    lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
    lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
    lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java
    lucene/java/trunk/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
    lucene/java/trunk/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java
    lucene/java/trunk/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
    lucene/java/trunk/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java
    lucene/java/trunk/contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java
    lucene/java/trunk/src/java/org/apache/lucene/analysis/Analyzer.java
    lucene/java/trunk/src/java/org/apache/lucene/analysis/StopAnalyzer.java
    lucene/java/trunk/src/java/org/apache/lucene/analysis/StopFilter.java
    lucene/java/trunk/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java
    lucene/java/trunk/src/java/org/apache/lucene/util/Version.java
    lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java
    lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDirectoryReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestOmitTf.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java
    lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
    lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestQueryParser.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanOr.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestFuzzyQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiSearcher.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPhraseQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPositionIncrement.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSimpleExplanations.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestStressSort.java
    lucene/java/trunk/src/test/org/apache/lucene/search/function/FunctionTestSetup.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestSpans.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestWindowsMMap.java

Modified: lucene/java/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/java/trunk/CHANGES.txt?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/CHANGES.txt (original)
+++ lucene/java/trunk/CHANGES.txt Wed Oct 21 12:12:11 2009
@@ -77,8 +77,11 @@
 
 * LUCENE-1989: Generify CharArraySet. (Uwe Schindler)
 
-* LUCENE-1987: Rremove deprecations from analysis package and Token.
-  Un-deprecate some ctors of Token, as they are still useful.
+* LUCENE-1987: Un-deprecate some ctors of Token, as they will not
+  be removed in 3.0 and are still useful. Also add some missing
+  o.a.l.util.Version constants for enabling invalid acronym
+  settings in StandardAnalyzer to be compatible with the coming
+  Lucene 3.0. Remove the rest of deprecated analysis methods/classes.
   (Uwe Schindler)
 
 * LUCENE-1945: All public classes that have a close() method now

Modified: lucene/java/trunk/common-build.xml
URL: http://svn.apache.org/viewvc/lucene/java/trunk/common-build.xml?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/common-build.xml (original)
+++ lucene/java/trunk/common-build.xml Wed Oct 21 12:12:11 2009
@@ -42,7 +42,7 @@
   <property name="Name" value="Lucene"/>
   <property name="dev.version" value="3.0-dev"/>
   <property name="version" value="${dev.version}"/>
-  <property name="compatibility.tag" value="lucene_2_9_back_compat_tests_20091018"/>
+  <property name="compatibility.tag" value="lucene_2_9_back_compat_tests_20091021"/>
   <property name="spec.version" value="${version}"/>	
   <property name="year" value="2000-${current.year}"/>
   <property name="final.name" value="lucene-${name}-${version}"/>

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -64,7 +64,7 @@
   /**
    * Contains the stopwords used with the StopFilter.
    */
-  private Set stoptable = new HashSet();
+  private final Set<?> stoptable;
   /**
    * The comment character in the stopwords file.  All lines prefixed with this will be ignored  
    */
@@ -119,15 +119,15 @@
   /**
    * Builds an analyzer with the given stop words.
    */
-  public ArabicAnalyzer( String[] stopwords ) {
+  public ArabicAnalyzer( String... stopwords ) {
     stoptable = StopFilter.makeStopSet( stopwords );
   }
 
   /**
    * Builds an analyzer with the given stop words.
    */
-  public ArabicAnalyzer( Hashtable stopwords ) {
-    stoptable = new HashSet(stopwords.keySet());
+  public ArabicAnalyzer( Hashtable<?,?> stopwords ) {
+    stoptable = new HashSet( stopwords.keySet() );
   }
 
   /**
@@ -149,7 +149,7 @@
     TokenStream result = new ArabicLetterTokenizer( reader );
     result = new LowerCaseFilter(result);
     // the order here is important: the stopword list is not normalized!
-    result = new StopFilter( result, stoptable );
+    result = new StopFilter(false, result, stoptable );
     result = new ArabicNormalizationFilter( result );
     result = new ArabicStemFilter( result );
 
@@ -177,7 +177,7 @@
       streams.source = new ArabicLetterTokenizer(reader);
       streams.result = new LowerCaseFilter(streams.source);
       // the order here is important: the stopword list is not normalized!
-      streams.result = new StopFilter(streams.result, stoptable);
+      streams.result = new StopFilter(false, streams.result, stoptable);
       streams.result = new ArabicNormalizationFilter(streams.result);
       streams.result = new ArabicStemFilter(streams.result);
       setPreviousTokenStream(streams);

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -23,6 +23,7 @@
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+import java.util.Collections;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseFilter;
@@ -71,12 +72,12 @@
 	/**
 	 * Contains the stopwords used with the {@link StopFilter}.
 	 */
-	private Set stoptable = new HashSet();
+	private Set stoptable = Collections.emptySet();
 	
 	/**
 	 * Contains words that should be indexed but not stemmed.
 	 */
-	private Set excltable = new HashSet();
+	private Set excltable = Collections.emptySet();
 
 	/**
 	 * Builds an analyzer with the default stop words ({@link #BRAZILIAN_STOP_WORDS}).
@@ -88,7 +89,7 @@
 	/**
 	 * Builds an analyzer with the given stop words.
 	 */
-	public BrazilianAnalyzer( String[] stopwords ) {
+	public BrazilianAnalyzer( String... stopwords ) {
 		stoptable = StopFilter.makeStopSet( stopwords );
 	}
 
@@ -109,7 +110,7 @@
 	/**
 	 * Builds an exclusionlist from an array of Strings.
 	 */
-	public void setStemExclusionTable( String[] exclusionlist ) {
+	public void setStemExclusionTable( String... exclusionlist ) {
 		excltable = StopFilter.makeStopSet( exclusionlist );
 		setPreviousTokenStream(null); // force a new stemmer to be created
 	}
@@ -139,7 +140,7 @@
 		TokenStream result = new StandardTokenizer( reader );
 		result = new LowerCaseFilter( result );
 		result = new StandardFilter( result );
-		result = new StopFilter( result, stoptable );
+		result = new StopFilter( false, result, stoptable );
 		result = new BrazilianStemFilter( result, excltable );
 		return result;
 	}
@@ -165,7 +166,7 @@
         streams.source = new StandardTokenizer(reader);
         streams.result = new LowerCaseFilter(streams.source);
         streams.result = new StandardFilter(streams.result);
-        streams.result = new StopFilter(streams.result, stoptable);
+        streams.result = new StopFilter(false, streams.result, stoptable);
         streams.result = new BrazilianStemFilter(streams.result, excltable);
         setPreviousTokenStream(streams);
       } else {

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -55,7 +55,7 @@
   /**
    * stop word list
    */
-  private Set stopTable;
+  private final Set stopTable;
 
   //~ Constructors -----------------------------------------------------------
 
@@ -71,7 +71,7 @@
    *
    * @param stopWords stop word array
    */
-  public CJKAnalyzer(String[] stopWords) {
+  public CJKAnalyzer(String... stopWords) {
     stopTable = StopFilter.makeStopSet(stopWords);
   }
 
@@ -86,7 +86,7 @@
    *    {@link StopFilter}
    */
   public final TokenStream tokenStream(String fieldName, Reader reader) {
-    return new StopFilter(new CJKTokenizer(reader), stopTable);
+    return new StopFilter(false, new CJKTokenizer(reader), stopTable);
   }
   
   private class SavedStreams {
@@ -109,7 +109,7 @@
     if (streams == null) {
       streams = new SavedStreams();
       streams.source = new CJKTokenizer(reader);
-      streams.result = new StopFilter(streams.source, stopTable);
+      streams.result = new StopFilter(false, streams.source, stopTable);
       setPreviousTokenStream(streams);
     } else {
       streams.source.reset(reader);

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -29,6 +29,7 @@
 import java.io.*;
 import java.util.HashSet;
 import java.util.Set;
+import java.util.Collections;
 
 /**
  * {@link Analyzer} for Czech language. 
@@ -79,7 +80,7 @@
 	/**
 	 * Builds an analyzer with the given stop words.
 	 */
-	public CzechAnalyzer( String[] stopwords ) {
+	public CzechAnalyzer( String... stopwords ) {
 		stoptable = StopFilter.makeStopSet( stopwords );
 	}
 
@@ -107,7 +108,7 @@
         }
         try {
             // clear any previous table (if present)
-            stoptable = new HashSet();
+            stoptable = Collections.emptySet();
 
             InputStreamReader isr;
             if (encoding == null)
@@ -115,16 +116,11 @@
             else
                 isr = new InputStreamReader(wordfile, encoding);
 
-            LineNumberReader lnr = new LineNumberReader(isr);
-            String word;
-            while ( ( word = lnr.readLine() ) != null ) {
-                stoptable.add(word);
-            }
-
+            stoptable = WordlistLoader.getWordSet(isr);
         } catch ( IOException e ) {
           // clear any previous table (if present)
           // TODO: throw IOException
-          stoptable = new HashSet();
+          stoptable = Collections.emptySet();
         }
     }
 
@@ -138,7 +134,7 @@
 		TokenStream result = new StandardTokenizer( reader );
 		result = new StandardFilter( result );
 		result = new LowerCaseFilter( result );
-		result = new StopFilter( result, stoptable );
+		result = new StopFilter(false, result, stoptable );
 		return result;
 	}
 	
@@ -162,7 +158,7 @@
         streams.source = new StandardTokenizer(reader);
         streams.result = new StandardFilter(streams.source);
         streams.result = new LowerCaseFilter(streams.result);
-        streams.result = new StopFilter(streams.result, stoptable);
+        streams.result = new StopFilter(false, streams.result, stoptable);
         setPreviousTokenStream(streams);
       } else {
         streams.source.reset(reader);

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -86,7 +86,7 @@
   /**
    * Builds an analyzer with the given stop words.
    */
-  public GermanAnalyzer(String[] stopwords) {
+  public GermanAnalyzer(String... stopwords) {
     stopSet = StopFilter.makeStopSet(stopwords);
     setOverridesTokenStreamMethod(GermanAnalyzer.class);
   }
@@ -142,7 +142,7 @@
     TokenStream result = new StandardTokenizer(reader);
     result = new StandardFilter(result);
     result = new LowerCaseFilter(result);
-    result = new StopFilter(result, stopSet);
+    result = new StopFilter(false, result, stopSet);
     result = new GermanStemFilter(result, exclusionSet);
     return result;
   }
@@ -174,7 +174,7 @@
       streams.source = new StandardTokenizer(reader);
       streams.result = new StandardFilter(streams.source);
       streams.result = new LowerCaseFilter(streams.result);
-      streams.result = new StopFilter(streams.result, stopSet);
+      streams.result = new StopFilter(false, streams.result, stopSet);
       streams.result = new GermanStemFilter(streams.result, exclusionSet);
       setPreviousTokenStream(streams);
     } else {

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -67,7 +67,7 @@
      * Builds an analyzer with the given stop words.
      * @param stopwords Array of stopwords to use.
      */
-    public GreekAnalyzer(String [] stopwords)
+    public GreekAnalyzer(String... stopwords)
     {
         super();
     	stopSet = StopFilter.makeStopSet(stopwords);
@@ -92,7 +92,7 @@
     {
     	TokenStream result = new StandardTokenizer(reader);
         result = new GreekLowerCaseFilter(result);
-        result = new StopFilter(result, stopSet);
+        result = new StopFilter(false, result, stopSet);
         return result;
     }
     
@@ -115,7 +115,7 @@
         streams = new SavedStreams();
         streams.source = new StandardTokenizer(reader);
         streams.result = new GreekLowerCaseFilter(streams.source);
-        streams.result = new StopFilter(streams.result, stopSet);
+        streams.result = new StopFilter(false, streams.result, stopSet);
         setPreviousTokenStream(streams);
       } else {
         streams.source.reset(reader);

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -92,7 +92,7 @@
   /**
    * Builds an analyzer with the given stop words.
    */
-  public FrenchAnalyzer(String[] stopwords) {
+  public FrenchAnalyzer(String... stopwords) {
     stoptable = StopFilter.makeStopSet(stopwords);
   }
 
@@ -107,7 +107,7 @@
   /**
    * Builds an exclusionlist from an array of Strings.
    */
-  public void setStemExclusionTable(String[] exclusionlist) {
+  public void setStemExclusionTable(String... exclusionlist) {
     excltable = StopFilter.makeStopSet(exclusionlist);
     setPreviousTokenStream(null); // force a new stemmer to be created
   }
@@ -144,7 +144,7 @@
 
     TokenStream result = new StandardTokenizer(reader);
     result = new StandardFilter(result);
-    result = new StopFilter(result, stoptable);
+    result = new StopFilter(false, result, stoptable);
     result = new FrenchStemFilter(result, excltable);
     // Convert to lowercase after stemming!
     result = new LowerCaseFilter(result);
@@ -171,7 +171,7 @@
       streams = new SavedStreams();
       streams.source = new StandardTokenizer(reader);
       streams.result = new StandardFilter(streams.source);
-      streams.result = new StopFilter(streams.result, stoptable);
+      streams.result = new StopFilter(false, streams.result, stoptable);
       streams.result = new FrenchStemFilter(streams.result, excltable);
       // Convert to lowercase after stemming!
       streams.result = new LowerCaseFilter(streams.result);

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -94,7 +94,7 @@
    *
    * @param stopwords
    */
-  public DutchAnalyzer(String[] stopwords) {
+  public DutchAnalyzer(String... stopwords) {
     setOverridesTokenStreamMethod(DutchAnalyzer.class);
     stoptable = StopFilter.makeStopSet(stopwords);
   }
@@ -129,7 +129,7 @@
    *
    * @param exclusionlist
    */
-  public void setStemExclusionTable(String[] exclusionlist) {
+  public void setStemExclusionTable(String... exclusionlist) {
     excltable = StopFilter.makeStopSet(exclusionlist);
     setPreviousTokenStream(null); // force a new stemmer to be created
   }
@@ -181,7 +181,7 @@
   public TokenStream tokenStream(String fieldName, Reader reader) {
     TokenStream result = new StandardTokenizer(reader);
     result = new StandardFilter(result);
-    result = new StopFilter(result, stoptable);
+    result = new StopFilter(false, result, stoptable);
     result = new DutchStemFilter(result, excltable, stemdict);
     return result;
   }
@@ -213,7 +213,7 @@
       streams = new SavedStreams();
       streams.source = new StandardTokenizer(reader);
       streams.result = new StandardFilter(streams.source);
-      streams.result = new StopFilter(streams.result, stoptable);
+      streams.result = new StopFilter(false, streams.result, stoptable);
       streams.result = new DutchStemFilter(streams.result, excltable, stemdict);
       setPreviousTokenStream(streams);
     } else {

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -175,7 +175,7 @@
     }
     HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName);
     if (stopWords != null) {
-      result = new StopFilter(result, stopWords);
+      result = new StopFilter(false, result, stopWords);
     }
     return result;
   }
@@ -217,7 +217,7 @@
       /* if there are any stopwords for the field, save the stopfilter */
       HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName);
       if (stopWords != null)
-        streams.withStopFilter = new StopFilter(streams.wrapped, stopWords);
+        streams.withStopFilter = new StopFilter(false, streams.wrapped, stopWords);
       else
         streams.withStopFilter = streams.wrapped;
 
@@ -238,7 +238,7 @@
         streams.wrapped = result;
         HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName);
         if (stopWords != null)
-          streams.withStopFilter = new StopFilter(streams.wrapped, stopWords);
+          streams.withStopFilter = new StopFilter(false, streams.wrapped, stopWords);
         else
           streams.withStopFilter = streams.wrapped;
       }

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -67,7 +67,7 @@
     /**
      * Builds an analyzer with the given stop words.
      */
-    public RussianAnalyzer(String[] stopwords)
+    public RussianAnalyzer(String... stopwords)
     {
     	super();
     	stopSet = StopFilter.makeStopSet(stopwords);
@@ -96,7 +96,7 @@
     {
         TokenStream result = new RussianLetterTokenizer(reader);
         result = new LowerCaseFilter(result);
-        result = new StopFilter(result, stopSet);
+        result = new StopFilter(false, result, stopSet);
         result = new RussianStemFilter(result);
         return result;
     }
@@ -122,7 +122,7 @@
       streams = new SavedStreams();
       streams.source = new RussianLetterTokenizer(reader);
       streams.result = new LowerCaseFilter(streams.source);
-      streams.result = new StopFilter(streams.result, stopSet);
+      streams.result = new StopFilter(false, streams.result, stopSet);
       streams.result = new RussianStemFilter(streams.result);
       setPreviousTokenStream(streams);
     } else {

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java Wed Oct 21 12:12:11 2009
@@ -23,6 +23,7 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.util.Version;
 
 /**
  * A ShingleAnalyzerWrapper wraps a {@link ShingleFilter} around another {@link Analyzer}.
@@ -50,14 +51,17 @@
   /**
    * Wraps {@link StandardAnalyzer}. 
    */
-  public ShingleAnalyzerWrapper() {
+  public ShingleAnalyzerWrapper(Version matchVersion) {
     super();
-    this.defaultAnalyzer = new StandardAnalyzer();
+    this.defaultAnalyzer = new StandardAnalyzer(matchVersion);
     setOverridesTokenStreamMethod(ShingleAnalyzerWrapper.class);
   }
 
-  public ShingleAnalyzerWrapper(int nGramSize) {
-    this();
+  /**
+   * Wraps {@link StandardAnalyzer}. 
+   */
+  public ShingleAnalyzerWrapper(Version matchVersion, int nGramSize) {
+    this(matchVersion);
     this.maxShingleSize = nGramSize;
   }
 

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java Wed Oct 21 12:12:11 2009
@@ -40,7 +40,7 @@
 	  TokenStream ts = new StandardTokenizer(reader);
     ts = new StandardFilter(ts);
     ts = new ThaiWordFilter(ts);
-    ts = new StopFilter(ts, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+    ts = new StopFilter(false, ts, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
     return ts;
   }
   
@@ -63,7 +63,7 @@
       streams.source = new StandardTokenizer(reader);
       streams.result = new StandardFilter(streams.source);
       streams.result = new ThaiWordFilter(streams.result);
-      streams.result = new StopFilter(streams.result, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+      streams.result = new StopFilter(false, streams.result, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
       setPreviousTokenStream(streams);
     } else {
       streams.source.reset(reader);

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Wed Oct 21 12:12:11 2009
@@ -216,6 +216,10 @@
    * subclass that acts just like whitespace analyzer for testing
    */
   private class ShingleWrapperSubclassAnalyzer extends ShingleAnalyzerWrapper {
+    public ShingleWrapperSubclassAnalyzer() {
+      super(org.apache.lucene.util.Version.LUCENE_CURRENT);
+    }
+  
     public TokenStream tokenStream(String fieldName, Reader reader) {
       return new WhitespaceTokenizer(reader);
     }  

Modified: lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java (original)
+++ lucene/java/trunk/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java Wed Oct 21 12:12:11 2009
@@ -27,6 +27,7 @@
 import java.util.Properties;
 import java.util.Set;
 import java.util.Vector;
+import java.lang.reflect.Constructor;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.SimpleAnalyzer;
@@ -43,6 +44,7 @@
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.util.Version;
 import org.apache.tools.ant.BuildException;
 import org.apache.tools.ant.DynamicConfigurator;
 import org.apache.tools.ant.Project;
@@ -200,6 +202,17 @@
     handlerConfig = config;
   }
 
+  private static final Analyzer createAnalyzer(String className) throws Exception{
+    final Class<? extends Analyzer> clazz = Class.forName(className).asSubclass(Analyzer.class);
+    try {
+      // first try to use a ctor with version parameter (needed for many new Analyzers that have no default one anymore
+      Constructor<? extends Analyzer> cnstr = clazz.getConstructor(Version.class);
+      return cnstr.newInstance(Version.LUCENE_CURRENT);
+    } catch (NoSuchMethodException nsme) {
+      // otherwise use default ctor
+      return clazz.newInstance();
+    }
+  }
 
   /**
    *  Begins the indexing
@@ -214,14 +227,9 @@
       Class clazz = Class.forName(handlerClassName);
       handler = (DocumentHandler) clazz.newInstance();
 
-      clazz = Class.forName(analyzerClassName);
-      analyzer = (Analyzer) clazz.newInstance();
-    } catch (ClassNotFoundException cnfe) {
-      throw new BuildException(cnfe);
-    } catch (InstantiationException ie) {
-      throw new BuildException(ie);
-    } catch (IllegalAccessException iae) {
-      throw new BuildException(iae);
+      analyzer = this.createAnalyzer(analyzerClassName);
+    } catch (Exception e) {
+      throw new BuildException(e);
     }
 
     log("Document handler = " + handler.getClass(), Project.MSG_VERBOSE);

Modified: lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java (original)
+++ lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java Wed Oct 21 12:12:11 2009
@@ -69,7 +69,7 @@
 
         dir = FSDirectory.open(indexDir);
         searcher = new IndexSearcher(dir, true);
-        analyzer = new StopAnalyzer();
+        analyzer = new StopAnalyzer(false);
     }
 
 

Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java Wed Oct 21 12:12:11 2009
@@ -30,6 +30,7 @@
 import org.apache.lucene.benchmark.byTask.tasks.SearchTask;
 import org.apache.lucene.benchmark.byTask.utils.Config;
 import org.apache.lucene.benchmark.byTask.utils.FileUtils;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.search.IndexSearcher;
@@ -76,8 +77,8 @@
   public PerfRunData (Config config) throws Exception {
     this.config = config;
     // analyzer (default is standard analyzer)
-    analyzer = (Analyzer) Class.forName(config.get("analyzer",
-        "org.apache.lucene.analysis.standard.StandardAnalyzer")).newInstance();
+    analyzer = NewAnalyzerTask.createAnalyzer(config.get("analyzer",
+        "org.apache.lucene.analysis.standard.StandardAnalyzer"));
     // doc maker
     docMaker = (DocMaker) Class.forName(config.get("doc.maker",
         "org.apache.lucene.benchmark.byTask.feeds.DocMaker")).newInstance();

Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java Wed Oct 21 12:12:11 2009
@@ -32,6 +32,7 @@
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
 
 /**
  * A QueryMaker that uses common and uncommon actual Wikipedia queries for
@@ -122,8 +123,7 @@
 
   protected Query[] prepareQueries() throws Exception {
     // analyzer (default is standard analyzer)
-    Analyzer anlzr = (Analyzer) Class.forName(
-        config.get("analyzer", StandardAnalyzer.class.getName())).newInstance();
+    Analyzer anlzr = NewAnalyzerTask.createAnalyzer(config.get("analyzer", StandardAnalyzer.class.getName()));
 
     List queryList = new ArrayList(20);
     queryList.addAll(Arrays.asList(STANDARD_QUERIES));

Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java Wed Oct 21 12:12:11 2009
@@ -4,6 +4,7 @@
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
 
 import java.io.*;
 import java.util.ArrayList;
@@ -44,8 +45,8 @@
 
   protected Query[] prepareQueries() throws Exception {
 
-    Analyzer anlzr = (Analyzer) Class.forName(config.get("analyzer",
-            "org.apache.lucene.analysis.standard.StandardAnalyzer")).newInstance();
+    Analyzer anlzr = NewAnalyzerTask.createAnalyzer(config.get("analyzer",
+            "org.apache.lucene.analysis.standard.StandardAnalyzer"));
     String defaultField = config.get("file.query.maker.default.field", DocMaker.BODY_FIELD);
     QueryParser qp = new QueryParser(defaultField, anlzr);
 

Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java Wed Oct 21 12:12:11 2009
@@ -26,6 +26,7 @@
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -102,8 +103,8 @@
   
   protected Query[] prepareQueries() throws Exception {
     // analyzer (default is standard analyzer)
-    Analyzer anlzr= (Analyzer) Class.forName(config.get("analyzer",
-    "org.apache.lucene.analysis.standard.StandardAnalyzer")).newInstance(); 
+    Analyzer anlzr= NewAnalyzerTask.createAnalyzer(config.get("analyzer",
+    "org.apache.lucene.analysis.standard.StandardAnalyzer")); 
     
     List queryList = new ArrayList(20);
     queryList.addAll(Arrays.asList(STANDARD_QUERIES));

Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java Wed Oct 21 12:12:11 2009
@@ -24,6 +24,7 @@
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
 
 import java.util.ArrayList;
 
@@ -42,8 +43,8 @@
    */
   protected Query[] prepareQueries() throws Exception {
     // analyzer (default is standard analyzer)
-    Analyzer anlzr= (Analyzer) Class.forName(config.get("analyzer",
-        "org.apache.lucene.analysis.standard.StandardAnalyzer")).newInstance(); 
+    Analyzer anlzr= NewAnalyzerTask.createAnalyzer(config.get("analyzer",
+        "org.apache.lucene.analysis.standard.StandardAnalyzer")); 
     
     QueryParser qp = new QueryParser(DocMaker.BODY_FIELD,anlzr);
     ArrayList qq = new ArrayList();

Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java Wed Oct 21 12:12:11 2009
@@ -17,11 +17,13 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.benchmark.byTask.PerfRunData;
+import org.apache.lucene.util.Version;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
+import java.lang.reflect.Constructor;
 
 /**
  * Create a new {@link org.apache.lucene.analysis.Analyzer} and set it it in the getRunData() for use by all future tasks.
@@ -35,6 +37,18 @@
     super(runData);
     analyzerClassNames = new ArrayList();
   }
+  
+  public static final Analyzer createAnalyzer(String className) throws Exception{
+    final Class<? extends Analyzer> clazz = Class.forName(className).asSubclass(Analyzer.class);
+    try {
+      // first try to use a ctor with version parameter (needed for many new Analyzers that have no default one anymore
+      Constructor<? extends Analyzer> cnstr = clazz.getConstructor(Version.class);
+      return cnstr.newInstance(Version.LUCENE_CURRENT);
+    } catch (NoSuchMethodException nsme) {
+      // otherwise use default ctor
+      return clazz.newInstance();
+    }
+  }
 
   public int doLogic() throws IOException {
     String className = null;
@@ -52,7 +66,7 @@
       {
         className = "org.apache.lucene.analysis." + className;
       }
-      getRunData().setAnalyzer((Analyzer) Class.forName(className).newInstance());
+      getRunData().setAnalyzer(createAnalyzer(className));
       System.out.println("Changed Analyzer to: " + className);
     } catch (Exception e) {
       throw new RuntimeException("Error creating Analyzer: " + className, e);

Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java Wed Oct 21 12:12:11 2009
@@ -22,6 +22,7 @@
 import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Version;
 
 /**
  * Simplistic quality query parser. A Lucene query is created by passing 
@@ -49,7 +50,7 @@
   public Query parse(QualityQuery qq) throws ParseException {
     QueryParser qp = (QueryParser) queryParser.get();
     if (qp==null) {
-      qp = new QueryParser(indexField, new StandardAnalyzer());
+      qp = new QueryParser(indexField, new StandardAnalyzer(Version.LUCENE_CURRENT));
       queryParser.set(qp);
     }
     return qp.parse(qq.getValue(qqName));

Modified: lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (original)
+++ lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java Wed Oct 21 12:12:11 2009
@@ -83,13 +83,16 @@
  *
  */
 public class HighlighterTest extends BaseTokenStreamTestCase implements Formatter {
+  // TODO: change to CURRENT, does not work because posIncr:
+  static final Version TEST_VERSION = Version.LUCENE_24;
+
   private IndexReader reader;
   static final String FIELD_NAME = "contents";
   private Query query;
   RAMDirectory ramDir;
   public IndexSearcher searcher = null;
   int numHighlights = 0;
-  Analyzer analyzer = new StandardAnalyzer();
+  final Analyzer analyzer = new StandardAnalyzer(TEST_VERSION);
   TopDocs hits;
 
   String[] texts = {
@@ -140,7 +143,7 @@
 
     String s1 = "I call our world Flatland, not because we call it so,";
 
-    QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer(Version.LUCENE_CURRENT));
+    QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
 
     // Verify that a query against the default field results in text being
     // highlighted
@@ -172,7 +175,7 @@
    */
   private static String highlightField(Query query, String fieldName, String text)
       throws IOException, InvalidTokenOffsetsException {
-    TokenStream tokenStream = new StandardAnalyzer(Version.LUCENE_CURRENT).tokenStream(fieldName, new StringReader(text));
+    TokenStream tokenStream = new StandardAnalyzer(TEST_VERSION).tokenStream(fieldName, new StringReader(text));
     // Assuming "<B>", "</B>" used to highlight
     SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
     QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME);
@@ -542,7 +545,7 @@
         // Need to explicitly set the QueryParser property to use TermRangeQuery
         // rather
         // than RangeFilters
-        QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer());
+        QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
         parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
         query = parser.parse(queryString);
         doSearching(query);
@@ -693,7 +696,7 @@
         hg.setTextFragmenter(new NullFragmenter());
 
         String match = null;
-        match = hg.getBestFragment(new StandardAnalyzer(), "data", "help me [54-65]");
+        match = hg.getBestFragment(analyzer, "data", "help me [54-65]");
         assertEquals("<B>help</B> me [54-65]", match);
 
       }
@@ -1004,13 +1007,13 @@
           sb.append(stopWords.iterator().next());
         }
         SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
-        Highlighter hg = getHighlighter(query, "data", new StandardAnalyzer(stopWords).tokenStream(
+        Highlighter hg = getHighlighter(query, "data", new StandardAnalyzer(TEST_VERSION, stopWords).tokenStream(
             "data", new StringReader(sb.toString())), fm);// new Highlighter(fm,
         // new
         // QueryTermScorer(query));
         hg.setTextFragmenter(new NullFragmenter());
         hg.setMaxDocBytesToAnalyze(100);
-        match = hg.getBestFragment(new StandardAnalyzer(stopWords), "data", sb.toString());
+        match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "data", sb.toString());
         assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
             .getMaxDocBytesToAnalyze());
 
@@ -1021,7 +1024,7 @@
         // + whitespace)
         sb.append(" ");
         sb.append(goodWord);
-        match = hg.getBestFragment(new StandardAnalyzer(stopWords), "data", sb.toString());
+        match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "data", sb.toString());
         assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
             .getMaxDocBytesToAnalyze());
       }
@@ -1041,11 +1044,11 @@
 
         String text = "this is a text with searchterm in it";
         SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
-        Highlighter hg = getHighlighter(query, "text", new StandardAnalyzer(
+        Highlighter hg = getHighlighter(query, "text", new StandardAnalyzer(TEST_VERSION, 
             stopWords).tokenStream("text", new StringReader(text)), fm);
         hg.setTextFragmenter(new NullFragmenter());
         hg.setMaxDocCharsToAnalyze(36);
-        String match = hg.getBestFragment(new StandardAnalyzer(stopWords), "text", text);
+        String match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "text", text);
         assertTrue(
             "Matched text should contain remainder of text after highlighted query ",
             match.endsWith("in it"));
@@ -1061,7 +1064,7 @@
         numHighlights = 0;
         // test to show how rewritten query can still be used
         searcher = new IndexSearcher(ramDir, true);
-        Analyzer analyzer = new StandardAnalyzer();
+        Analyzer analyzer = new StandardAnalyzer(TEST_VERSION);
 
         QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
         Query query = parser.parse("JF? or Kenned*");
@@ -1074,7 +1077,7 @@
         // highlighted text
         // QueryHighlightExtractor highlighter = new
         // QueryHighlightExtractor(this,
-        // query, new StandardAnalyzer());
+        // query, new StandardAnalyzer(TEST_VERSION));
 
         int maxNumFragmentsRequired = 3;
 
@@ -1173,7 +1176,7 @@
   public void testMultiSearcher() throws Exception {
     // setup index 1
     RAMDirectory ramDir1 = new RAMDirectory();
-    IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED);
     Document d = new Document();
     Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED);
     d.add(f);
@@ -1184,7 +1187,7 @@
 
     // setup index 2
     RAMDirectory ramDir2 = new RAMDirectory();
-    IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED);
     d = new Document();
     f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED);
     d.add(f);
@@ -1197,14 +1200,14 @@
     searchers[0] = new IndexSearcher(ramDir1, true);
     searchers[1] = new IndexSearcher(ramDir2, true);
     MultiSearcher multiSearcher = new MultiSearcher(searchers);
-    QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer());
+    QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
     parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
     query = parser.parse("multi*");
     System.out.println("Searching for: " + query.toString(FIELD_NAME));
     // at this point the multisearcher calls combine(query[])
     hits = multiSearcher.search(query, null, 1000);
 
-    // query = QueryParser.parse("multi*", FIELD_NAME, new StandardAnalyzer());
+    // query = QueryParser.parse("multi*", FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
     Query expandedQueries[] = new Query[2];
     expandedQueries[0] = query.rewrite(reader1);
     expandedQueries[1] = query.rewrite(reader2);
@@ -1527,7 +1530,7 @@
   }
 
   public void doSearching(String queryString) throws Exception {
-    QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer());
+    QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
     parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
     query = parser.parse(queryString);
     doSearching(query);
@@ -1564,7 +1567,7 @@
   protected void setUp() throws Exception {
     super.setUp();
     ramDir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED);
     for (int i = 0; i < texts.length; i++) {
       addDoc(writer, texts[i]);
     }

Modified: lucene/java/trunk/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (original)
+++ lucene/java/trunk/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java Wed Oct 21 12:12:11 2009
@@ -63,7 +63,7 @@
     RAMDirectory dir = new RAMDirectory();
 
     // create dir data
-    IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
     for (int i = 0; i < 20; i++) {
       Document document = new Document();
       assembleDocument(document, i);
@@ -87,7 +87,7 @@
     InstantiatedIndex ii = new InstantiatedIndex();
 
     // create dir data
-    IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
     for (int i = 0; i < 500; i++) {
       Document document = new Document();
       assembleDocument(document, i);
@@ -96,7 +96,7 @@
     indexWriter.close();
 
     // test ii writer
-    InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(), true);
+    InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true);
     for (int i = 0; i < 500; i++) {
       Document document = new Document();
       assembleDocument(document, i);

Modified: lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java (original)
+++ lucene/java/trunk/contrib/lucli/src/java/lucli/LuceneMethods.java Wed Oct 21 12:12:11 2009
@@ -57,6 +57,7 @@
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.util.Version;
 
 /**
  * Various methods that interact with Lucene and provide info about the 
@@ -80,18 +81,18 @@
   }
 
     private Analyzer createAnalyzer() {
-        if (analyzerClassFQN == null) return new StandardAnalyzer();
+        if (analyzerClassFQN == null) return new StandardAnalyzer(Version.LUCENE_CURRENT);
         try {
             Class aClass = Class.forName(analyzerClassFQN);
             Object obj = aClass.newInstance();
             if (!(obj instanceof Analyzer)) {
                 message("Given class is not an Analyzer: " + analyzerClassFQN);
-                return new StandardAnalyzer();
+                return new StandardAnalyzer(Version.LUCENE_CURRENT);
             }
             return (Analyzer)obj;
         } catch (Exception e) {
             message("Unable to use Analyzer " + analyzerClassFQN);
-            return new StandardAnalyzer();
+            return new StandardAnalyzer(Version.LUCENE_CURRENT);
         }
     }
 

Modified: lucene/java/trunk/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (original)
+++ lucene/java/trunk/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java Wed Oct 21 12:12:11 2009
@@ -277,8 +277,8 @@
     
     Analyzer[] analyzers = new Analyzer[] { 
         new SimpleAnalyzer(),
-        new StopAnalyzer(),
-        new StandardAnalyzer(),
+        new StopAnalyzer(true),
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
         PatternAnalyzer.DEFAULT_ANALYZER,
 //        new WhitespaceAnalyzer(),
 //        new PatternAnalyzer(PatternAnalyzer.NON_WORD_PATTERN, false, null),

Modified: lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (original)
+++ lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java Wed Oct 21 12:12:11 2009
@@ -6,6 +6,7 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.Version;
 
 import java.util.Collections;
 /*
@@ -29,7 +30,7 @@
   public void test() throws Exception {
 
     Directory dir = new RAMDirectory();
-    IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(Collections.EMPTY_SET), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.UNLIMITED);
 
     Document doc;
 

Modified: lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (original)
+++ lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java Wed Oct 21 12:12:11 2009
@@ -36,7 +36,7 @@
 
 public class TestComplexPhraseQuery extends TestCase {
 
-  Analyzer analyzer = new StandardAnalyzer();
+  Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
 
   DocData docsContent[] = { new DocData("john smith", "1"),
       new DocData("johathon smith", "2"),

Modified: lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (original)
+++ lucene/java/trunk/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java Wed Oct 21 12:12:11 2009
@@ -44,6 +44,7 @@
 import java.util.Calendar;
 import java.util.GregorianCalendar;
 import java.util.HashSet;
+import java.util.Collections;
 
 public class TestPrecedenceQueryParser extends LocalizedTestCase {
   
@@ -233,7 +234,7 @@
     assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
                       "+(title:dog title:cat) -author:\"bob dole\"");
     
-    PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer());
+    PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     // make sure OR is the default:
     assertEquals(PrecedenceQueryParser.OR_OPERATOR, qp.getDefaultOperator());
     qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR);
@@ -267,7 +268,7 @@
     assertQueryEquals("term 1.0 1 2", null, "term");
     assertQueryEquals("term term1 term2", null, "term term term");
 
-    Analyzer a = new StandardAnalyzer();
+    Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
     assertQueryEquals("3", a, "3");
     assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
     assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -505,7 +506,7 @@
 
   public void testBoost()
     throws Exception {
-    StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(new String[]{"on"});
+    StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.singleton("on"));
     PrecedenceQueryParser qp = new PrecedenceQueryParser("field", oneStopAnalyzer);
     Query q = qp.parse("on^1.0");
     assertNotNull(q);
@@ -518,7 +519,7 @@
     q = qp.parse("\"on\"^1.0");
     assertNotNull(q);
 
-    q = getParser(new StandardAnalyzer()).parse("the^3");
+    q = getParser(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)).parse("the^3");
     assertNotNull(q);
   }
 

Modified: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (original)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java Wed Oct 21 12:12:11 2009
@@ -49,6 +49,7 @@
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.Version;
 
 
 /**
@@ -162,7 +163,7 @@
      * Default analyzer to parse source doc with.
 	 * @see #getAnalyzer
      */
-    public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer();
+    public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer(Version.LUCENE_CURRENT);
 
     /**
      * Ignore terms with less than this frequency in the source doc.

Modified: lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (original)
+++ lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java Wed Oct 21 12:12:11 2009
@@ -42,7 +42,7 @@
 	protected void setUp() throws Exception
 	{
 		directory = new RAMDirectory();
-		IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+		IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
 		
 		//Add series of docs with filterable fields : url, text and dates  flags
 		addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");

Modified: lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (original)
+++ lucene/java/trunk/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java Wed Oct 21 12:12:11 2009
@@ -43,7 +43,7 @@
 
     protected void setUp() throws Exception {
 	directory = new RAMDirectory();
-	IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(),
+	IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
 		true, MaxFieldLength.UNLIMITED);
 
 	// Add series of docs with specific information for MoreLikeThis

Modified: lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (original)
+++ lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java Wed Oct 21 12:12:11 2009
@@ -83,7 +83,7 @@
     String[] fields = { "b", "t" };
     StandardQueryParser mfqp = new StandardQueryParser();
     mfqp.setMultiFields(fields);
-    mfqp.setAnalyzer(new StandardAnalyzer());
+    mfqp.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
 
     Query q = mfqp.parse("one", null);
     assertEquals("b:one t:one", q.toString());
@@ -153,7 +153,7 @@
     StandardQueryParser mfqp = new StandardQueryParser();
     mfqp.setMultiFields(fields);
     mfqp.setFieldsBoost(boosts);
-    mfqp.setAnalyzer(new StandardAnalyzer());
+    mfqp.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
 
     // Check for simple
     Query q = mfqp.parse("one", null);
@@ -181,24 +181,24 @@
   public void testStaticMethod1() throws QueryNodeException {
     String[] fields = { "b", "t" };
     String[] queries = { "one", "two" };
-    Query q = QueryParserUtil.parse(queries, fields, new StandardAnalyzer());
+    Query q = QueryParserUtil.parse(queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("b:one t:two", q.toString());
 
     String[] queries2 = { "+one", "+two" };
-    q = QueryParserUtil.parse(queries2, fields, new StandardAnalyzer());
+    q = QueryParserUtil.parse(queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("(+b:one) (+t:two)", q.toString());
 
     String[] queries3 = { "one", "+two" };
-    q = QueryParserUtil.parse(queries3, fields, new StandardAnalyzer());
+    q = QueryParserUtil.parse(queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("b:one (+t:two)", q.toString());
 
     String[] queries4 = { "one +more", "+two" };
-    q = QueryParserUtil.parse(queries4, fields, new StandardAnalyzer());
+    q = QueryParserUtil.parse(queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("(b:one +b:more) (+t:two)", q.toString());
 
     String[] queries5 = { "blah" };
     try {
-      q = QueryParserUtil.parse(queries5, fields, new StandardAnalyzer());
+      q = QueryParserUtil.parse(queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -222,15 +222,15 @@
     BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
         BooleanClause.Occur.MUST_NOT };
     Query q = QueryParserUtil.parse("one", fields, flags,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+b:one -t:one", q.toString());
 
-    q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer());
+    q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
 
     try {
       BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
-      q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer());
+      q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -243,19 +243,19 @@
         BooleanClause.Occur.MUST_NOT };
     StandardQueryParser parser = new StandardQueryParser();
     parser.setMultiFields(fields);
-    parser.setAnalyzer(new StandardAnalyzer());
+    parser.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
 
     Query q = QueryParserUtil.parse("one", fields, flags,
-        new StandardAnalyzer());// , fields, flags, new
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));// , fields, flags, new
     // StandardAnalyzer());
     assertEquals("+b:one -t:one", q.toString());
 
-    q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer());
+    q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
 
     try {
       BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
-      q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer());
+      q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -268,13 +268,13 @@
     BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
         BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
     Query q = QueryParserUtil.parse(queries, fields, flags,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+f1:one -f2:two f3:three", q.toString());
 
     try {
       BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
       q = QueryParserUtil
-          .parse(queries, fields, flags2, new StandardAnalyzer());
+          .parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -287,13 +287,13 @@
     BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
         BooleanClause.Occur.MUST_NOT };
     Query q = QueryParserUtil.parse(queries, fields, flags,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+b:one -t:two", q.toString());
 
     try {
       BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
       q = QueryParserUtil
-          .parse(queries, fields, flags2, new StandardAnalyzer());
+          .parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -319,7 +319,7 @@
   }
 
   public void testStopWordSearching() throws Exception {
-    Analyzer analyzer = new StandardAnalyzer();
+    Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
     Directory ramDir = new RAMDirectory();
     IndexWriter iw = new IndexWriter(ramDir, analyzer, true,
         IndexWriter.MaxFieldLength.LIMITED);
@@ -345,7 +345,7 @@
    * Return empty tokens for field "f1".
    */
   private static class AnalyzerReturningNull extends Analyzer {
-    StandardAnalyzer stdAnalyzer = new StandardAnalyzer();
+    StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
 
     public AnalyzerReturningNull() {
     }

Modified: lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java (original)
+++ lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java Wed Oct 21 12:12:11 2009
@@ -78,7 +78,7 @@
   public void testSimple() throws Exception {
     String[] fields = { "b", "t" };
     MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
-        fields, new StandardAnalyzer());
+        fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
 
     Query q = mfqp.parse("one");
     assertEquals("b:one t:one", q.toString());
@@ -146,7 +146,7 @@
     boosts.put("t", Float.valueOf(10));
     String[] fields = { "b", "t" };
     MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
-        fields, new StandardAnalyzer(), boosts);
+        fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts);
 
     // Check for simple
     Query q = mfqp.parse("one");
@@ -175,28 +175,28 @@
     String[] fields = { "b", "t" };
     String[] queries = { "one", "two" };
     Query q = MultiFieldQueryParserWrapper.parse(queries, fields,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("b:one t:two", q.toString());
 
     String[] queries2 = { "+one", "+two" };
     q = MultiFieldQueryParserWrapper.parse(queries2, fields,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("(+b:one) (+t:two)", q.toString());
 
     String[] queries3 = { "one", "+two" };
     q = MultiFieldQueryParserWrapper.parse(queries3, fields,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("b:one (+t:two)", q.toString());
 
     String[] queries4 = { "one +more", "+two" };
     q = MultiFieldQueryParserWrapper.parse(queries4, fields,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("(b:one +b:more) (+t:two)", q.toString());
 
     String[] queries5 = { "blah" };
     try {
       q = MultiFieldQueryParserWrapper.parse(queries5, fields,
-          new StandardAnalyzer());
+          new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -220,17 +220,17 @@
     BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
         BooleanClause.Occur.MUST_NOT };
     Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+b:one -t:one", q.toString());
 
     q = MultiFieldQueryParserWrapper.parse("one two", fields, flags,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
 
     try {
       BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
       q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2,
-          new StandardAnalyzer());
+          new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -244,20 +244,20 @@
     BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
         BooleanClause.Occur.MUST_NOT };
     MultiFieldQueryParserWrapper parser = new MultiFieldQueryParserWrapper(
-        fields, new StandardAnalyzer());
+        fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
 
     Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags,
-        new StandardAnalyzer());// , fields, flags, new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));// , fields, flags, new StandardAnalyzer());
     assertEquals("+b:one -t:one", q.toString());
 
     q = MultiFieldQueryParserWrapper.parse("one two", fields, flags,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
 
     try {
       BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
       q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2,
-          new StandardAnalyzer());
+          new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -270,13 +270,13 @@
     BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
         BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
     Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+f1:one -f2:two f3:three", q.toString());
 
     try {
       BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
       q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2,
-          new StandardAnalyzer());
+          new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -289,13 +289,13 @@
     BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
         BooleanClause.Occur.MUST_NOT };
     Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags,
-        new StandardAnalyzer());
+        new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
     assertEquals("+b:one -t:two", q.toString());
 
     try {
       BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
       q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2,
-          new StandardAnalyzer());
+          new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception, array length differs
@@ -319,7 +319,7 @@
   }
 
   public void testStopWordSearching() throws Exception {
-    Analyzer analyzer = new StandardAnalyzer();
+    Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
     Directory ramDir = new RAMDirectory();
     IndexWriter iw = new IndexWriter(ramDir, analyzer, true,
         IndexWriter.MaxFieldLength.LIMITED);
@@ -343,7 +343,7 @@
    * Return empty tokens for field "f1".
    */
   private static class AnalyzerReturningNull extends Analyzer {
-    StandardAnalyzer stdAnalyzer = new StandardAnalyzer();
+    StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
 
     public AnalyzerReturningNull() {
     }

Modified: lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java?rev=827979&r1=827978&r2=827979&view=diff
==============================================================================
--- lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (original)
+++ lucene/java/trunk/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java Wed Oct 21 12:12:11 2009
@@ -30,6 +30,7 @@
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Collections;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.KeywordAnalyzer;
@@ -77,6 +78,7 @@
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.util.LocalizedTestCase;
+import org.apache.lucene.util.Version;
 
 /**
  * This test case is a copy of the core Lucene query parser test, it was adapted
@@ -410,7 +412,7 @@
     assertQueryEquals("term 1.0 1 2", null, "term");
     assertQueryEquals("term term1 term2", null, "term term term");
 
-    Analyzer a = new StandardAnalyzer();
+    Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
     assertQueryEquals("3", a, "3");
     assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
     assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -896,8 +898,7 @@
   }
 
   public void testBoost() throws Exception {
-    StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(
-        new String[] { "on" });
+    StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.singleton("on"));
     StandardQueryParser qp = new StandardQueryParser();
     qp.setAnalyzer(oneStopAnalyzer);
 
@@ -913,7 +914,7 @@
     assertNotNull(q);
 
     StandardQueryParser qp2 = new StandardQueryParser();
-    qp2.setAnalyzer(new StandardAnalyzer());
+    qp2.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
 
     q = qp2.parse("the^3", "field");
     // "the" is a stop word so the result is an empty query:
@@ -1069,7 +1070,7 @@
   public void testStopwords() throws Exception {
     StandardQueryParser qp = new StandardQueryParser();
     qp.setAnalyzer(
-        new StopAnalyzer(new String[] { "the", "foo" }));
+        new StopAnalyzer(StopFilter.makeStopSet("the", "foo" ), true));
 
     Query result = qp.parse("a:the OR a:foo", "a");
     assertNotNull("result is null and it shouldn't be", result);
@@ -1090,31 +1091,24 @@
   }
 
   public void testPositionIncrement() throws Exception {
-    boolean dflt = StopFilter.getEnablePositionIncrementsDefault();
-    StopFilter.setEnablePositionIncrementsDefault(true);
-    try {
-      StandardQueryParser qp = new StandardQueryParser();
-      qp.setAnalyzer(
-          new StopAnalyzer(new String[] { "the", "in", "are", "this" }));
-
-      qp.setEnablePositionIncrements(true);
+    StandardQueryParser qp = new StandardQueryParser();
+    qp.setAnalyzer(
+        new StopAnalyzer(StopFilter.makeStopSet("the", "in", "are", "this" ), true));
 
-      String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
-      // 0 2 5 7 8
-      int expectedPositions[] = { 1, 3, 4, 6, 9 };
-      PhraseQuery pq = (PhraseQuery) qp.parse(qtxt, "a");
-      // System.out.println("Query text: "+qtxt);
-      // System.out.println("Result: "+pq);
-      Term t[] = pq.getTerms();
-      int pos[] = pq.getPositions();
-      for (int i = 0; i < t.length; i++) {
-        // System.out.println(i+". "+t[i]+"  pos: "+pos[i]);
-        assertEquals("term " + i + " = " + t[i] + " has wrong term-position!",
-            expectedPositions[i], pos[i]);
-      }
+    qp.setEnablePositionIncrements(true);
 
-    } finally {
-      StopFilter.setEnablePositionIncrementsDefault(dflt);
+    String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
+    // 0 2 5 7 8
+    int expectedPositions[] = { 1, 3, 4, 6, 9 };
+    PhraseQuery pq = (PhraseQuery) qp.parse(qtxt, "a");
+    // System.out.println("Query text: "+qtxt);
+    // System.out.println("Result: "+pq);
+    Term t[] = pq.getTerms();
+    int pos[] = pq.getPositions();
+    for (int i = 0; i < t.length; i++) {
+      // System.out.println(i+". "+t[i]+"  pos: "+pos[i]);
+      assertEquals("term " + i + " = " + t[i] + " has wrong term-position!",
+          expectedPositions[i], pos[i]);
     }
   }
 



Mime
View raw message