lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rm...@apache.org
Subject svn commit: r929782 [3/3] - in /lucene/dev/trunk/solr: ./ src/java/org/apache/solr/analysis/ src/java/org/apache/solr/util/ src/test/org/apache/solr/analysis/ src/test/org/apache/solr/handler/ src/test/org/apache/solr/util/ src/test/test-files/solr/conf/
Date Thu, 01 Apr 2010 02:15:30 GMT
Added: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestKeywordMarkerFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestKeywordMarkerFilterFactory.java?rev=929782&view=auto
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestKeywordMarkerFilterFactory.java
(added)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestKeywordMarkerFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -0,0 +1,65 @@
+package org.apache.solr.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.PorterStemFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.WhitespaceTokenizer;
+import org.apache.solr.common.ResourceLoader;
+import org.apache.solr.core.SolrResourceLoader;
+
+/**
+ * Simple tests to ensure the keyword marker filter factory is working.
+ */
+public class TestKeywordMarkerFilterFactory extends BaseTokenTestCase {
+  public void testKeywords() throws IOException {
+    Reader reader = new StringReader("dogs cats");
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
+    KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory();
+    Map<String,String> args = new HashMap<String,String>(DEFAULT_VERSION_PARAM);
+    ResourceLoader loader = new SolrResourceLoader(null, null);
+    args.put("protected", "protwords.txt");
+    factory.init(args);
+    factory.inform(loader);
+    
+    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
+    assertTokenStreamContents(ts, new String[] { "dog", "cats" });
+  }
+  
+  public void testKeywordsCaseInsensitive() throws IOException {
+    Reader reader = new StringReader("dogs cats Cats");
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
+    KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory();
+    Map<String,String> args = new HashMap<String,String>(DEFAULT_VERSION_PARAM);
+    ResourceLoader loader = new SolrResourceLoader(null, null);
+    args.put("protected", "protwords.txt");
+    args.put("ignoreCase", "true");
+    factory.init(args);
+    factory.inform(loader);
+    
+    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
+    assertTokenStreamContents(ts, new String[] { "dog", "cats", "Cats" });
+  }
+}

Propchange: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestKeywordMarkerFilterFactory.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java Thu
Apr  1 02:15:27 2010
@@ -20,7 +20,7 @@ public class TestMultiWordSynonyms exten
     SynonymMap synMap = new SynonymMap(true);
     SynonymFilterFactory.parseRules(rules, synMap, "=>", ",", true, null);
 
-    SynonymFilter ts = new SynonymFilter(new WhitespaceTokenizer(new StringReader("a e")),
synMap);
+    SynonymFilter ts = new SynonymFilter(new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader("a
e")), synMap);
     // This fails because ["e","e"] is the value of the token stream
     assertTokenStreamContents(ts, new String[] { "a", "e" });
   }

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestNGramFilters.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestNGramFilters.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestNGramFilters.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestNGramFilters.java Thu Apr
 1 02:15:27 2010
@@ -64,7 +64,7 @@ public class TestNGramFilters extends Ba
     Map<String,String> args = new HashMap<String,String>();
     NGramFilterFactory factory = new NGramFilterFactory();
     factory.init(args);
-    TokenStream stream = factory.create(new WhitespaceTokenizer(reader));
+    TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, reader));
     assertTokenStreamContents(stream, 
         new String[] { "t", "e", "s", "t", "te", "es", "st" });
   }
@@ -78,7 +78,7 @@ public class TestNGramFilters extends Ba
     args.put("maxGramSize", "3");
     NGramFilterFactory factory = new NGramFilterFactory();
     factory.init(args);
-    TokenStream stream = factory.create(new WhitespaceTokenizer(reader));
+    TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, reader));
     assertTokenStreamContents(stream, 
         new String[] { "te", "es", "st", "tes", "est" });
   }
@@ -129,7 +129,7 @@ public class TestNGramFilters extends Ba
     Map<String,String> args = new HashMap<String,String>();
     EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory();
     factory.init(args);
-    TokenStream stream = factory.create(new WhitespaceTokenizer(reader));
+    TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, reader));
     assertTokenStreamContents(stream, 
         new String[] { "t" });
   }
@@ -143,7 +143,7 @@ public class TestNGramFilters extends Ba
     args.put("maxGramSize", "2");
     EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory();
     factory.init(args);
-    TokenStream stream = factory.create(new WhitespaceTokenizer(reader));
+    TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, reader));
     assertTokenStreamContents(stream, 
         new String[] { "t", "te" });
   }
@@ -156,7 +156,7 @@ public class TestNGramFilters extends Ba
     args.put("side", "back");
     EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory();
     factory.init(args);
-    TokenStream stream = factory.create(new WhitespaceTokenizer(reader));
+    TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, reader));
     assertTokenStreamContents(stream, 
         new String[] { "y" });
   }

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternReplaceCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternReplaceCharFilter.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternReplaceCharFilter.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternReplaceCharFilter.java
Thu Apr  1 02:15:27 2010
@@ -47,7 +47,7 @@ public class TestPatternReplaceCharFilte
     factory.init(args);
     CharStream cs = factory.create(
           CharReader.get( new StringReader( BLOCK ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertTokenStreamContents(ts,
         new String[] { "this", "is", "test." },
         new int[] { 0, 5, 8 },
@@ -64,7 +64,7 @@ public class TestPatternReplaceCharFilte
     factory.init(args);
     CharStream cs = factory.create(
           CharReader.get( new StringReader( BLOCK ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertFalse(ts.incrementToken());
   }
   
@@ -80,7 +80,7 @@ public class TestPatternReplaceCharFilte
     factory.init(args);
     CharStream cs = factory.create(
           CharReader.get( new StringReader( BLOCK ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertTokenStreamContents(ts,
         new String[] { "aa#bb#cc" },
         new int[] { 0 },
@@ -95,7 +95,7 @@ public class TestPatternReplaceCharFilte
     final String BLOCK = "aa bb cc dd";
     CharStream cs = new PatternReplaceCharFilter( pattern("(aa)\\s+(bb)\\s+(cc)"), "$1##$2###$3",
           CharReader.get( new StringReader( BLOCK ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertTokenStreamContents(ts,
         new String[] { "aa##bb###cc", "dd" },
         new int[] { 0, 9 },
@@ -109,7 +109,7 @@ public class TestPatternReplaceCharFilte
     final String BLOCK = " a  a";
     CharStream cs = new PatternReplaceCharFilter( pattern("a"), "aa",
           CharReader.get( new StringReader( BLOCK ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertTokenStreamContents(ts,
         new String[] { "aa", "aa" },
         new int[] { 1, 4 },
@@ -124,7 +124,7 @@ public class TestPatternReplaceCharFilte
     final String BLOCK = "aa  bb   cc dd";
     CharStream cs = new PatternReplaceCharFilter( pattern("(aa)\\s+(bb)\\s+(cc)"), "$1#$2",
           CharReader.get( new StringReader( BLOCK ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertTokenStreamContents(ts,
         new String[] { "aa#bb", "dd" },
         new int[] { 0, 12 },
@@ -139,7 +139,7 @@ public class TestPatternReplaceCharFilte
     final String BLOCK = "  aa bb cc --- aa bb aa   bb   cc";
     CharStream cs = new PatternReplaceCharFilter( pattern("(aa)\\s+(bb)\\s+(cc)"), "$1  $2
 $3",
           CharReader.get( new StringReader( BLOCK ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertTokenStreamContents(ts,
         new String[] { "aa", "bb", "cc", "---", "aa", "bb", "aa", "bb", "cc" },
         new int[] { 2, 6, 9, 11, 15, 18, 21, 25, 29 },
@@ -154,7 +154,7 @@ public class TestPatternReplaceCharFilte
     final String BLOCK = "  aa bb cc --- aa bb aa. bb aa   bb cc";
     CharStream cs = new PatternReplaceCharFilter( pattern("(aa)\\s+(bb)"), "$1##$2", ".",
           CharReader.get( new StringReader( BLOCK ) ) );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertTokenStreamContents(ts,
         new String[] { "aa##bb", "cc", "---", "aa##bb", "aa.", "bb", "aa##bb", "cc" },
         new int[] { 2, 8, 11, 15, 21, 25, 28, 36 },
@@ -171,7 +171,7 @@ public class TestPatternReplaceCharFilte
         CharReader.get( new StringReader( BLOCK ) ) );
     cs = new PatternReplaceCharFilter( pattern("bb"), "b", ".", cs );
     cs = new PatternReplaceCharFilter( pattern("ccc"), "c", ".", cs );
-    TokenStream ts = new WhitespaceTokenizer( cs );
+    TokenStream ts = new WhitespaceTokenizer(DEFAULT_VERSION, cs );
     assertTokenStreamContents(ts,
         new String[] { "aa", "b", "-", "c", ".", "---", "b", "aa", ".", "c", "c", "b" },
         new int[] { 1, 3, 6, 8, 12, 14, 18, 21, 23, 25, 29, 33 },

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternReplaceFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternReplaceFilter.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternReplaceFilter.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternReplaceFilter.java
Thu Apr  1 02:15:27 2010
@@ -31,7 +31,7 @@ public class TestPatternReplaceFilter ex
   public void testReplaceAll() throws Exception {
     String input = "aabfooaabfooabfoob ab caaaaaaaaab";
     TokenStream ts = new PatternReplaceFilter
-            (new WhitespaceTokenizer(new StringReader(input)),
+            (new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(input)),
                     Pattern.compile("a*b"),
                     "-", true);
     assertTokenStreamContents(ts, 
@@ -41,7 +41,7 @@ public class TestPatternReplaceFilter ex
   public void testReplaceFirst() throws Exception {
     String input = "aabfooaabfooabfoob ab caaaaaaaaab";
     TokenStream ts = new PatternReplaceFilter
-            (new WhitespaceTokenizer(new StringReader(input)),
+            (new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(input)),
                     Pattern.compile("a*b"),
                     "-", false);
     assertTokenStreamContents(ts, 
@@ -51,7 +51,7 @@ public class TestPatternReplaceFilter ex
   public void testStripFirst() throws Exception {
     String input = "aabfooaabfooabfoob ab caaaaaaaaab";
     TokenStream ts = new PatternReplaceFilter
-            (new WhitespaceTokenizer(new StringReader(input)),
+            (new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(input)),
                     Pattern.compile("a*b"),
                     null, false);
     assertTokenStreamContents(ts,
@@ -61,7 +61,7 @@ public class TestPatternReplaceFilter ex
   public void testStripAll() throws Exception {
     String input = "aabfooaabfooabfoob ab caaaaaaaaab";
     TokenStream ts = new PatternReplaceFilter
-            (new WhitespaceTokenizer(new StringReader(input)),
+            (new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(input)),
                     Pattern.compile("a*b"),
                     null, true);
     assertTokenStreamContents(ts,
@@ -71,7 +71,7 @@ public class TestPatternReplaceFilter ex
   public void testReplaceAllWithBackRef() throws Exception {
     String input = "aabfooaabfooabfoob ab caaaaaaaaab";
     TokenStream ts = new PatternReplaceFilter
-            (new WhitespaceTokenizer(new StringReader(input)),
+            (new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(input)),
                     Pattern.compile("(a*)b"),
                     "$1\\$", true);
     assertTokenStreamContents(ts,

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternTokenizerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternTokenizerFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternTokenizerFactory.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPatternTokenizerFactory.java
Thu Apr  1 02:15:27 2010
@@ -117,7 +117,7 @@ public class TestPatternTokenizerFactory
    */
   private static String tsToString(TokenStream in) throws IOException {
     StringBuilder out = new StringBuilder();
-    TermAttribute termAtt = (TermAttribute) in.addAttribute(TermAttribute.class);
+    TermAttribute termAtt = in.addAttribute(TermAttribute.class);
     // extra safety to enforce, that the state is not preserved and also
     // assign bogus values
     in.clearAttributes();

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPersianNormalizationFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPersianNormalizationFilterFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPersianNormalizationFilterFactory.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPersianNormalizationFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -33,7 +33,7 @@ public class TestPersianNormalizationFil
    */
   public void testNormalization() throws Exception {
     Reader reader = new StringReader("های");
-    Tokenizer tokenizer = new WhitespaceTokenizer(reader);
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
     PersianNormalizationFilterFactory factory = new PersianNormalizationFilterFactory();
     TokenStream stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] { "هاي" });

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPhoneticFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPhoneticFilter.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPhoneticFilter.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPhoneticFilter.java Thu Apr
 1 02:15:27 2010
@@ -83,7 +83,7 @@ public class TestPhoneticFilter extends 
   
   static void assertAlgorithm(String algName, String inject, String input,
       String[] expected) throws Exception {
-    Tokenizer tokenizer = new WhitespaceTokenizer(
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION,
         new StringReader(input));
     Map<String,String> args = new HashMap<String,String>();
     args.put("encoder", algName);

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPorterStemFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPorterStemFilterFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPorterStemFilterFactory.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestPorterStemFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -33,7 +33,7 @@ public class TestPorterStemFilterFactory
    */
   public void testStemming() throws Exception {
     Reader reader = new StringReader("dogs");
-    Tokenizer tokenizer = new WhitespaceTokenizer(reader);
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
     PorterStemFilterFactory factory = new PorterStemFilterFactory();
     TokenStream stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] { "dog" });

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilter.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilter.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilter.java
Thu Apr  1 02:15:27 2010
@@ -17,7 +17,6 @@
 
 package org.apache.solr.analysis;
 
-import junit.framework.TestCase;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
@@ -45,9 +44,9 @@ public class TestRemoveDuplicatesTokenFi
     RemoveDuplicatesTokenFilterFactory factory = new RemoveDuplicatesTokenFilterFactory();
     final TokenStream ts = factory.create
       (new TokenStream() {
-          TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-          OffsetAttribute offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
-          PositionIncrementAttribute posIncAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
+          TermAttribute termAtt = addAttribute(TermAttribute.class);
+          OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+          PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
           public boolean incrementToken() {
             if (toks.hasNext()) {
               clearAttributes();

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestReverseStringFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestReverseStringFilterFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestReverseStringFilterFactory.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestReverseStringFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -33,7 +33,7 @@ public class TestReverseStringFilterFact
    */
   public void testReversing() throws Exception {
     Reader reader = new StringReader("simple test");
-    Tokenizer tokenizer = new WhitespaceTokenizer(reader);
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
     ReverseStringFilterFactory factory = new ReverseStringFilterFactory();
     factory.init(DEFAULT_VERSION_PARAM);
     TokenStream stream = factory.create(tokenizer);

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -58,7 +58,7 @@ public class TestReversedWildcardFilterF
     String text = "simple text";
     args.put("withOriginal", "true");
     factory.init(args);
-    TokenStream input = factory.create(new WhitespaceTokenizer(new StringReader(text)));
+    TokenStream input = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(text)));
     assertTokenStreamContents(input, 
         new String[] { "\u0001elpmis", "simple", "\u0001txet", "text" },
         new int[] { 1, 0, 1, 0 });
@@ -66,7 +66,7 @@ public class TestReversedWildcardFilterF
     // now without original tokens
     args.put("withOriginal", "false");
     factory.init(args);
-    input = factory.create(new WhitespaceTokenizer(new StringReader(text)));
+    input = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(text)));
     assertTokenStreamContents(input,
         new String[] { "\u0001elpmis", "\u0001txet" },
         new int[] { 1, 1 });

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestRussianFilters.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestRussianFilters.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestRussianFilters.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestRussianFilters.java Thu Apr
 1 02:15:27 2010
@@ -19,8 +19,6 @@ package org.apache.solr.analysis;
 
 import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestShingleFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestShingleFilterFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestShingleFilterFactory.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestShingleFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -37,7 +37,7 @@ public class TestShingleFilterFactory ex
     Map<String,String> args = new HashMap<String,String>();
     ShingleFilterFactory factory = new ShingleFilterFactory();
     factory.init(args);
-    TokenStream stream = factory.create(new WhitespaceTokenizer(reader));
+    TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, reader));
     assertTokenStreamContents(stream, new String[] {"this", "this is", "is",
         "is a", "a", "a test", "test"});
   }
@@ -51,7 +51,7 @@ public class TestShingleFilterFactory ex
     args.put("outputUnigrams", "false");
     ShingleFilterFactory factory = new ShingleFilterFactory();
     factory.init(args);
-    TokenStream stream = factory.create(new WhitespaceTokenizer(reader));
+    TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, reader));
     assertTokenStreamContents(stream,
         new String[] {"this is", "is a", "a test"});
   }
@@ -65,7 +65,7 @@ public class TestShingleFilterFactory ex
     args.put("maxShingleSize", "3");
     ShingleFilterFactory factory = new ShingleFilterFactory();
     factory.init(args);
-    TokenStream stream = factory.create(new WhitespaceTokenizer(reader));
+    TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, reader));
     assertTokenStreamContents(stream, 
         new String[] {"this", "this is", "this is a", "is",
         "is a", "is a test", "a", "a test", "test"});

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStandardFactories.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStandardFactories.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStandardFactories.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStandardFactories.java Thu
Apr  1 02:15:27 2010
@@ -108,7 +108,7 @@ public class TestStandardFactories exten
    */
   public void testASCIIFolding() throws Exception {
     Reader reader = new StringReader("Česká");
-    Tokenizer tokenizer = new WhitespaceTokenizer(reader);
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
     ASCIIFoldingFilterFactory factory = new ASCIIFoldingFilterFactory();
     factory.init(DEFAULT_VERSION_PARAM);
     TokenStream stream = factory.create(tokenizer);
@@ -121,7 +121,7 @@ public class TestStandardFactories exten
    */
   public void testISOLatin1Folding() throws Exception {
     Reader reader = new StringReader("Česká");
-    Tokenizer tokenizer = new WhitespaceTokenizer(reader);
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
     ISOLatin1AccentFilterFactory factory = new ISOLatin1AccentFilterFactory();
     factory.init(DEFAULT_VERSION_PARAM);
     TokenStream stream = factory.create(tokenizer);

Added: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStemmerOverrideFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStemmerOverrideFilterFactory.java?rev=929782&view=auto
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStemmerOverrideFilterFactory.java
(added)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStemmerOverrideFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -0,0 +1,66 @@
+package org.apache.solr.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.PorterStemFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.WhitespaceTokenizer;
+import org.apache.solr.common.ResourceLoader;
+import org.apache.solr.core.SolrResourceLoader;
+
+/**
+ * Simple tests to ensure the stemmer override filter factory is working.
+ */
+public class TestStemmerOverrideFilterFactory extends BaseTokenTestCase {
+  public void testKeywords() throws IOException {
+    // our stemdict stems dogs to 'cat'
+    Reader reader = new StringReader("testing dogs");
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
+    StemmerOverrideFilterFactory factory = new StemmerOverrideFilterFactory();
+    Map<String,String> args = new HashMap<String,String>(DEFAULT_VERSION_PARAM);
+    ResourceLoader loader = new SolrResourceLoader(null, null);
+    args.put("dictionary", "stemdict.txt");
+    factory.init(args);
+    factory.inform(loader);
+    
+    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
+    assertTokenStreamContents(ts, new String[] { "test", "cat" });
+  }
+  
+  public void testKeywordsCaseInsensitive() throws IOException {
+    Reader reader = new StringReader("testing DoGs");
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
+    StemmerOverrideFilterFactory factory = new StemmerOverrideFilterFactory();
+    Map<String,String> args = new HashMap<String,String>(DEFAULT_VERSION_PARAM);
+    ResourceLoader loader = new SolrResourceLoader(null, null);
+    args.put("dictionary", "stemdict.txt");
+    args.put("ignoreCase", "true");
+    factory.init(args);
+    factory.inform(loader);
+    
+    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
+    assertTokenStreamContents(ts, new String[] { "test", "cat" });
+  }
+}

Propchange: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStemmerOverrideFilterFactory.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStopFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStopFilterFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStopFilterFactory.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestStopFilterFactory.java Thu
Apr  1 02:15:27 2010
@@ -24,25 +24,22 @@ import java.util.Set;
 import java.util.Map;
 import java.util.HashMap;
 
-import junit.framework.TestCase;
-
-
 /**
  *
  *
  **/
-public class TestStopFilterFactory extends TestCase {
+public class TestStopFilterFactory extends BaseTokenTestCase {
 
   public void testInform() throws Exception {
     ResourceLoader loader = new SolrResourceLoader(null, null);
     assertTrue("loader is null and it shouldn't be", loader != null);
     StopFilterFactory factory = new StopFilterFactory();
-    Map<String, String> args = new HashMap<String, String>();
+    Map<String, String> args = new HashMap<String, String>(DEFAULT_VERSION_PARAM);
     args.put("words", "stop-1.txt");
     args.put("ignoreCase", "true");
     factory.init(args);
     factory.inform(loader);
-    Set words = factory.getStopWords();
+    Set<?> words = factory.getStopWords();
     assertTrue("words is null and it shouldn't be", words != null);
     assertTrue("words Size: " + words.size() + " is not: " + 2, words.size() == 2);
     assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory.isIgnoreCase()
== true);

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestSynonymFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestSynonymFilter.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestSynonymFilter.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestSynonymFilter.java Thu Apr
 1 02:15:27 2010
@@ -47,14 +47,14 @@ public class TestSynonymFilter extends B
 
   static void assertTokenizesTo(SynonymMap dict, String input,
       String expected[]) throws IOException {
-    Tokenizer tokenizer = new WhitespaceTokenizer(new StringReader(input));
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(input));
     SynonymFilter stream = new SynonymFilter(tokenizer, dict);
     assertTokenStreamContents(stream, expected);
   }
   
   static void assertTokenizesTo(SynonymMap dict, String input,
       String expected[], int posIncs[]) throws IOException {
-    Tokenizer tokenizer = new WhitespaceTokenizer(new StringReader(input));
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(input));
     SynonymFilter stream = new SynonymFilter(tokenizer, dict);
     assertTokenStreamContents(stream, expected, posIncs);
   }
@@ -381,12 +381,12 @@ public class TestSynonymFilter extends B
   private static class IterTokenStream extends TokenStream {
     final Token tokens[];
     int index = 0;
-    TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-    OffsetAttribute offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
-    PositionIncrementAttribute posIncAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
-    FlagsAttribute flagsAtt = (FlagsAttribute) addAttribute(FlagsAttribute.class);
-    TypeAttribute typeAtt = (TypeAttribute) addAttribute(TypeAttribute.class);
-    PayloadAttribute payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
+    TermAttribute termAtt = addAttribute(TermAttribute.class);
+    OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+    PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+    FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class);
+    TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+    PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
     
     public IterTokenStream(Token... tokens) {
       super();

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestThaiWordFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestThaiWordFilterFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestThaiWordFilterFactory.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestThaiWordFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -33,7 +33,7 @@ public class TestThaiWordFilterFactory e
    */
   public void testWordBreak() throws Exception {
     Reader reader = new StringReader("การที่ได้ต้องแสดงว่างานดี");
-    Tokenizer tokenizer = new WhitespaceTokenizer(reader);
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
     ThaiWordFilterFactory factory = new ThaiWordFilterFactory();
     TokenStream stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] {"การ", "ที่",
"ได้",

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestTrimFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestTrimFilter.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestTrimFilter.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestTrimFilter.java Thu Apr  1
02:15:27 2010
@@ -81,12 +81,12 @@ public class TestTrimFilter extends Base
   private static class IterTokenStream extends TokenStream {
     final Token tokens[];
     int index = 0;
-    TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-    OffsetAttribute offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
-    PositionIncrementAttribute posIncAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
-    FlagsAttribute flagsAtt = (FlagsAttribute) addAttribute(FlagsAttribute.class);
-    TypeAttribute typeAtt = (TypeAttribute) addAttribute(TypeAttribute.class);
-    PayloadAttribute payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
+    TermAttribute termAtt = addAttribute(TermAttribute.class);
+    OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+    PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+    FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class);
+    TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+    PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
     
     public IterTokenStream(Token... tokens) {
       super();

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestTurkishLowerCaseFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestTurkishLowerCaseFilterFactory.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestTurkishLowerCaseFilterFactory.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestTurkishLowerCaseFilterFactory.java
Thu Apr  1 02:15:27 2010
@@ -33,7 +33,7 @@ public class TestTurkishLowerCaseFilterF
    */
   public void testCasing() throws Exception {
     Reader reader = new StringReader("AĞACI");
-    Tokenizer tokenizer = new WhitespaceTokenizer(reader);
+    Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader);
     TurkishLowerCaseFilterFactory factory = new TurkishLowerCaseFilterFactory();
     TokenStream stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] { "ağacı" });

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestWordDelimiterFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestWordDelimiterFilter.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestWordDelimiterFilter.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/analysis/TestWordDelimiterFilter.java Thu
Apr  1 02:15:27 2010
@@ -350,8 +350,8 @@ public class TestWordDelimiterFilter ext
     
     protected LargePosIncTokenFilter(TokenStream input) {
       super(input);
-      termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-      posIncAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
+      termAtt = addAttribute(TermAttribute.class);
+      posIncAtt = addAttribute(PositionIncrementAttribute.class);
     }
 
     @Override
@@ -368,13 +368,13 @@ public class TestWordDelimiterFilter ext
   
   @Test
   public void testPositionIncrements() throws Exception {
-    final CharArraySet protWords = new CharArraySet(new HashSet<String>(Arrays.asList("NUTCH")),
false);
+    final CharArraySet protWords = new CharArraySet(DEFAULT_VERSION, new HashSet<String>(Arrays.asList("NUTCH")),
false);
     
     /* analyzer that uses whitespace + wdf */
     Analyzer a = new Analyzer() {
       public TokenStream tokenStream(String field, Reader reader) {
         return new WordDelimiterFilter(
-            new WhitespaceTokenizer(reader),
+            new WhitespaceTokenizer(DEFAULT_VERSION, reader),
             1, 1, 0, 0, 1, 1, 0, 1, 1, protWords);
       }
     };
@@ -401,7 +401,7 @@ public class TestWordDelimiterFilter ext
       public TokenStream tokenStream(String field, Reader reader) {
         return new WordDelimiterFilter(
             new LargePosIncTokenFilter(
-            new WhitespaceTokenizer(reader)),
+            new WhitespaceTokenizer(DEFAULT_VERSION, reader)),
             1, 1, 0, 0, 1, 1, 0, 1, 1, protWords);
       }
     };

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java
Thu Apr  1 02:15:27 2010
@@ -190,8 +190,8 @@ public class DocumentAnalysisRequestHand
     assertNotNull("Expecting the 'StopFilter' to be applied on the query for the 'text' field",
tokenList);
     assertEquals("Query has only one token", 1, tokenList.size());
     assertToken(tokenList.get(0), new TokenInfo("jumping", null, "<ALPHANUM>", 0, 7,
1, null, false));
-    tokenList = (List<NamedList>) queryResult.get("org.apache.solr.analysis.EnglishPorterFilter");
-    assertNotNull("Expecting the 'EnglishPorterFilter' to be applied on the query for the
'text' field", tokenList);
+    tokenList = (List<NamedList>) queryResult.get("org.apache.lucene.analysis.snowball.SnowballFilter");
+    assertNotNull("Expecting the 'SnowballFilter' to be applied on the query for the 'text'
field", tokenList);
     assertEquals("Query has only one token", 1, tokenList.size());
     assertToken(tokenList.get(0), new TokenInfo("jump", null, "<ALPHANUM>", 0, 7, 1,
null, false));
     indexResult = textResult.get("index");
@@ -231,8 +231,8 @@ public class DocumentAnalysisRequestHand
     assertToken(tokenList.get(1), new TokenInfo("jumped", null, "<ALPHANUM>", 8, 14,
2, null, false));
     assertToken(tokenList.get(2), new TokenInfo("over", null, "<ALPHANUM>", 15, 19,
3, null, false));
     assertToken(tokenList.get(3), new TokenInfo("dogs", null, "<ALPHANUM>", 24, 28,
4, null, false));
-    tokenList = valueResult.get("org.apache.solr.analysis.EnglishPorterFilter");
-    assertNotNull("Expecting the 'EnglishPorterFilter' to be applied on the index for the
'text' field", tokenList);
+    tokenList = valueResult.get("org.apache.lucene.analysis.snowball.SnowballFilter");
+    assertNotNull("Expecting the 'SnowballFilter' to be applied on the index for the 'text'
field", tokenList);
     assertEquals("Expecting 4 tokens", 4, tokenList.size());
     assertToken(tokenList.get(0), new TokenInfo("fox", null, "<ALPHANUM>", 4, 7, 1,
null, false));
     assertToken(tokenList.get(1), new TokenInfo("jump", null, "<ALPHANUM>", 8, 14,
2, null, true));

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java?rev=929782&r1=929781&r2=929782&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java
(original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java
Thu Apr  1 02:15:27 2010
@@ -173,8 +173,8 @@ public class FieldAnalysisRequestHandler
     assertToken(tokenList.get(5), new TokenInfo("lazy", null, "<ALPHANUM>", 34, 38,
6, null, false));
     assertToken(tokenList.get(6), new TokenInfo("brown", null, "<ALPHANUM>", 39, 44,
7, null, true));
     assertToken(tokenList.get(7), new TokenInfo("dogs", null, "<ALPHANUM>", 45, 49,
8, null, false));
-    tokenList = indexPart.get("org.apache.solr.analysis.EnglishPorterFilter");
-    assertNotNull("Expcting EnglishPorterFilter analysis breakdown", tokenList);
+    tokenList = indexPart.get("org.apache.lucene.analysis.snowball.SnowballFilter");
+    assertNotNull("Expcting SnowballFilter analysis breakdown", tokenList);
     assertEquals(tokenList.size(), 8);
     assertToken(tokenList.get(0), new TokenInfo("quick", null, "<ALPHANUM>", 4, 9,
1, null, false));
     assertToken(tokenList.get(1), new TokenInfo("red", null, "<ALPHANUM>", 10, 13,
2, null, false));
@@ -208,8 +208,8 @@ public class FieldAnalysisRequestHandler
     assertEquals(2, tokenList.size());
     assertToken(tokenList.get(0), new TokenInfo("fox", null, "<ALPHANUM>", 0, 3, 1,
null, false));
     assertToken(tokenList.get(1), new TokenInfo("brown", null, "<ALPHANUM>", 4, 9,
2, null, false));
-    tokenList = queryPart.get("org.apache.solr.analysis.EnglishPorterFilter");
-    assertNotNull("Expcting EnglishPorterFilter analysis breakdown", tokenList);
+    tokenList = queryPart.get("org.apache.lucene.analysis.snowball.SnowballFilter");
+    assertNotNull("Expcting SnowballFilter analysis breakdown", tokenList);
     assertEquals(2, tokenList.size());
     assertToken(tokenList.get(0), new TokenInfo("fox", null, "<ALPHANUM>", 0, 3, 1,
null, false));
     assertToken(tokenList.get(1), new TokenInfo("brown", null, "<ALPHANUM>", 4, 9,
2, null, false));

Added: lucene/dev/trunk/solr/src/test/test-files/solr/conf/stemdict.txt
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/test-files/solr/conf/stemdict.txt?rev=929782&view=auto
==============================================================================
--- lucene/dev/trunk/solr/src/test/test-files/solr/conf/stemdict.txt (added)
+++ lucene/dev/trunk/solr/src/test/test-files/solr/conf/stemdict.txt Thu Apr  1 02:15:27 2010
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#-----------------------------------------------------------------------
+# test that we can override the stemming algorithm with our own mappings
+# these must be tab-separated
+monkeys	monkey
+otters	otter
+# some crazy ones that a stemmer would never do
+dogs	cat

Propchange: lucene/dev/trunk/solr/src/test/test-files/solr/conf/stemdict.txt
------------------------------------------------------------------------------
    svn:eol-style = native



Mime
View raw message