lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From uschind...@apache.org
Subject svn commit: r807190 [1/2] - in /lucene/java/trunk: contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/ contrib/analyzers/common/src/test/org/apache/lucene/analysis...
Date Mon, 24 Aug 2009 12:44:16 GMT
Author: uschindler
Date: Mon Aug 24 12:44:13 2009
New Revision: 807190

URL: http://svn.apache.org/viewvc?rev=807190&view=rev
Log:
LUCENE-1843: Update contrib tests to conform to onlyUseNewAPI; refactored assertAnalyzesTo and others into the new BaseTokenStreamTestCase class; Rewrote TestMappingCharFilter to use the new assert functions, too; performance improvements of Token.copyTo(); new impl of SingleTokenTokenStream

Removed:
    lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenTestCase.java
Modified:
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/SingleTokenTokenStream.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestSingleTokenTokenFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/position/PositionFilterTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
    lucene/java/trunk/contrib/analyzers/smartcn/src/test/org/apache/lucene/analysis/cn/TestSmartChineseAnalyzer.java
    lucene/java/trunk/contrib/memory/src/test/org/apache/lucene/index/memory/TestSynonymTokenFilter.java
    lucene/java/trunk/contrib/snowball/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
    lucene/java/trunk/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java
    lucene/java/trunk/src/java/org/apache/lucene/analysis/Token.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestToken.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/tokenattributes/TestTermAttributeImpl.java
    lucene/java/trunk/src/test/org/apache/lucene/util/LuceneTestCase.java

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/SingleTokenTokenStream.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/SingleTokenTokenStream.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/SingleTokenTokenStream.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/SingleTokenTokenStream.java Mon Aug 24 12:44:13 2009
@@ -19,14 +19,10 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.util.AttributeImpl;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
 /**
  * A {@link TokenStream} containing a single token.
@@ -34,45 +30,37 @@
 public class SingleTokenTokenStream extends TokenStream {
 
   private boolean exhausted = false;
+  
   // The token needs to be immutable, so work with clones!
   private Token singleToken;
+  private final AttributeImpl tokenAtt;
 
-  private TermAttribute termAtt;
-  private OffsetAttribute offsetAtt;
-  private FlagsAttribute flagsAtt;
-  private PositionIncrementAttribute posIncAtt;
-  private TypeAttribute typeAtt;
-  private PayloadAttribute payloadAtt;
+  private static final AttributeFactory TOKEN_ATTRIBUTE_FACTORY = new AttributeFactory() {
+    public AttributeImpl createAttributeInstance(Class attClass) {
+      return attClass.isAssignableFrom(Token.class)
+        ? new Token() : DEFAULT_ATTRIBUTE_FACTORY.createAttributeInstance(attClass);
+    }
+  };
 
   public SingleTokenTokenStream(Token token) {
+    super(TOKEN_ATTRIBUTE_FACTORY);
+    
     assert token != null;
     this.singleToken = (Token) token.clone();
-     
-    termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-    offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
-    flagsAtt = (FlagsAttribute) addAttribute(FlagsAttribute.class);
-    posIncAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
-    typeAtt = (TypeAttribute) addAttribute(TypeAttribute.class);
-    payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
+    
+    tokenAtt = (AttributeImpl) addAttribute(TermAttribute.class);
+    assert (tokenAtt instanceof Token || tokenAtt.getClass().getName().equals("org.apache.lucene.analysis.TokenWrapper"));
   }
 
-
   public final boolean incrementToken() throws IOException {
     if (exhausted) {
       return false;
+    } else {
+      clearAttributes();
+      singleToken.copyTo(tokenAtt);
+      exhausted = true;
+      return true;
     }
-    
-    Token clone = (Token) singleToken.clone();
-    
-    clearAttributes();
-    termAtt.setTermBuffer(clone.termBuffer(), 0, clone.termLength());
-    offsetAtt.setOffset(clone.startOffset(), clone.endOffset());
-    flagsAtt.setFlags(clone.getFlags());
-    typeAtt.setType(clone.type());
-    posIncAtt.setPositionIncrement(clone.getPositionIncrement());
-    payloadAtt.setPayload(clone.getPayload());
-    exhausted = true;
-    return true;
   }
   
   /** @deprecated Will be removed in Lucene 3.0. This method is final, as it should

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java Mon Aug 24 12:44:13 2009
@@ -21,15 +21,13 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 
 /**
  * Test the Arabic Analyzer
  *
  */
-public class TestArabicAnalyzer extends TestCase {
+public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
   
   /** This test fails with NPE when the 
    * stopwords file is missing in classpath */
@@ -74,33 +72,4 @@
     assertAnalyzesTo(new ArabicAnalyzer(), "English text.", new String[] {
         "english", "text" });
   }
-  
-  private void assertAnalyzesTo(Analyzer a, String input, String[] output)
-      throws Exception {
-    TokenStream ts = a.tokenStream("dummy", new StringReader(input));
-    TermAttribute termAtt = (TermAttribute) ts
-        .getAttribute(TermAttribute.class);
-
-    for (int i = 0; i < output.length; i++) {
-      assertTrue(ts.incrementToken());
-      assertEquals(output[i], termAtt.term());
-    }
-
-    assertFalse(ts.incrementToken());
-    ts.close();
-  }
-  
-  private void assertAnalyzesToReuse(Analyzer a, String input, String[] output)
-      throws Exception {
-    TokenStream ts = a.reusableTokenStream("dummy", new StringReader(input));
-    TermAttribute termAtt = (TermAttribute) ts
-        .getAttribute(TermAttribute.class);
-
-    for (int i = 0; i < output.length; i++) {
-      assertTrue(ts.incrementToken());
-      assertEquals(output[i], termAtt.term());
-    }
-
-    assertFalse(ts.incrementToken());
-  }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java Mon Aug 24 12:44:13 2009
@@ -20,15 +20,14 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 /**
  * Test the Arabic Normalization Filter
  *
  */
-public class TestArabicNormalizationFilter extends TestCase {
+public class TestArabicNormalizationFilter extends BaseTokenStreamTestCase {
 
   public void testAlifMadda() throws IOException {
     check("آجن", "اجن");
@@ -89,11 +88,7 @@
   private void check(final String input, final String expected) throws IOException {
     ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(new StringReader(input));
     ArabicNormalizationFilter filter = new ArabicNormalizationFilter(tokenStream);
-    TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class);
-    
-    assertTrue(filter.incrementToken());
-    assertEquals(expected, termAtt.term());
-    filter.close();
+    assertTokenStreamContents(filter, new String[]{expected});
   }
 
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java Mon Aug 24 12:44:13 2009
@@ -20,15 +20,14 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 /**
  * Test the Arabic Normalization Filter
  *
  */
-public class TestArabicStemFilter extends TestCase {
+public class TestArabicStemFilter extends BaseTokenStreamTestCase {
   
   public void testAlPrefix() throws IOException {
     check("الحسن", "حسن");
@@ -117,11 +116,7 @@
   private void check(final String input, final String expected) throws IOException {
     ArabicLetterTokenizer tokenStream  = new ArabicLetterTokenizer(new StringReader(input));
     ArabicStemFilter filter = new ArabicStemFilter(tokenStream);
-    TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class);
-    
-    assertTrue(filter.incrementToken());
-    assertEquals(expected, termAtt.term());
-    filter.close();
+    assertTokenStreamContents(filter, new String[]{expected});
   }
 
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java Mon Aug 24 12:44:13 2009
@@ -17,11 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.io.StringReader;
-
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
@@ -32,9 +28,9 @@
  * It is very similar to the snowball portuguese algorithm but not exactly the same.
  *
  */
-public class TestBrazilianStemmer extends TestCase {
+public class TestBrazilianStemmer extends BaseTokenStreamTestCase {
   
-  public void testWithSnowballExamples() throws IOException {
+  public void testWithSnowballExamples() throws Exception {
 	 check("boa", "boa");
 	 check("boainain", "boainain");
 	 check("boas", "boas");
@@ -150,23 +146,13 @@
     a.setStemExclusionTable(new String[] { "quintessência" });
     checkReuse(a, "quintessência", "quintessência");
   }
-
-  private void check(final String input, final String expected) throws IOException {
-    Analyzer analyzer = new BrazilianAnalyzer(); 
-    TokenStream stream = analyzer.tokenStream("dummy", new StringReader(input));
-    TermAttribute text = (TermAttribute) stream.getAttribute(TermAttribute.class);
-    assertTrue(stream.incrementToken());
-    assertEquals(expected, text.term());
-    assertFalse(stream.incrementToken());
-    stream.close();
-  }
-  
-  private void checkReuse(Analyzer analyzer, final String input, final String expected) throws IOException {
-    TokenStream stream = analyzer.reusableTokenStream("dummy", new StringReader(input));
-    TermAttribute text = (TermAttribute) stream.getAttribute(TermAttribute.class);
-    assertTrue(stream.incrementToken());
-    assertEquals(expected, text.term());
-    assertFalse(stream.incrementToken());
+  
+  private void check(final String input, final String expected) throws Exception {
+    checkOneTerm(new BrazilianAnalyzer(), input, expected);
+  }
+  
+  private void checkReuse(Analyzer a, String input, String expected) throws Exception {
+    checkOneTermReuse(a, input, expected);
   }
 
 }
\ No newline at end of file

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java Mon Aug 24 12:44:13 2009
@@ -20,8 +20,7 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
@@ -29,7 +28,7 @@
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
 
-public class TestCJKTokenizer extends TestCase{
+public class TestCJKTokenizer extends BaseTokenStreamTestCase {
   
   class TestToken {
     String termText;

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java Mon Aug 24 12:44:13 2009
@@ -21,17 +21,15 @@
 import java.io.Reader;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 
-public class TestChineseTokenizer extends TestCase
+public class TestChineseTokenizer extends BaseTokenStreamTestCase
 {
     public void testOtherLetterOffset() throws IOException
     {
@@ -116,34 +114,5 @@
       assertAnalyzesTo(justFilter, "This is a Test. b c d", 
           new String[] { "This", "Test." });
     }
-    
-    private void assertAnalyzesTo(Analyzer a, String input, String[] output)
-      throws Exception {
-      TokenStream ts = a.tokenStream("dummy", new StringReader(input));
-      TermAttribute termAtt = (TermAttribute) ts
-      .getAttribute(TermAttribute.class);
-
-     for (int i = 0; i < output.length; i++) {
-       assertTrue(ts.incrementToken());
-       assertEquals(output[i], termAtt.term());
-     }
 
-     assertFalse(ts.incrementToken());
-     ts.close();
-    }
-    
-    private void assertAnalyzesToReuse(Analyzer a, String input, String[] output,
-      int startOffsets[], int endOffsets[])
-      throws Exception {
-      TokenStream ts = a.reusableTokenStream("dummy", new StringReader(input));
-      TermAttribute termAtt = (TermAttribute) ts
-        .getAttribute(TermAttribute.class);
-
-      for (int i = 0; i < output.length; i++) {
-        assertTrue(ts.incrementToken());
-        assertEquals(output[i], termAtt.term());
-      }
-
-      assertFalse(ts.incrementToken());
-    }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java Mon Aug 24 12:44:13 2009
@@ -31,8 +31,7 @@
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
@@ -41,7 +40,7 @@
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
-public class TestCompoundWordTokenFilter extends TestCase {
+public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
   private static String[] locations = {
       "http://dfn.dl.sourceforge.net/sourceforge/offo/offo-hyphenation.zip",
       "http://surfnet.dl.sourceforge.net/sourceforge/offo/offo-hyphenation.zip",
@@ -76,7 +75,7 @@
         dict, CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
         CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
         CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE, false);
-    assertFiltersTo(tf, new String[] { "Rindfleischüberwachungsgesetz", "Rind",
+    assertTokenStreamContents(tf, new String[] { "Rindfleischüberwachungsgesetz", "Rind",
         "fleisch", "überwachung", "gesetz", "Drahtschere", "Draht", "schere",
         "abba" }, new int[] { 0, 0, 4, 11, 23, 30, 30, 35, 42 }, new int[] {
         29, 4, 11, 22, 29, 41, 35, 41, 46 }, new int[] { 1, 0, 0, 0, 0, 1, 0,
@@ -101,7 +100,7 @@
             "Rindfleischüberwachungsgesetz")), hyphenator, dict,
         CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
         CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE, 40, true);
-    assertFiltersTo(tf, new String[] { "Rindfleischüberwachungsgesetz",
+    assertTokenStreamContents(tf, new String[] { "Rindfleischüberwachungsgesetz",
         "Rindfleisch", "fleisch", "überwachungsgesetz", "gesetz" }, new int[] {
         0, 0, 4, 11, 23 }, new int[] { 29, 11, 11, 29, 29 }, new int[] { 1, 0,
         0, 0, 0 });
@@ -118,7 +117,7 @@
                 "Bildörr Bilmotor Biltak Slagborr Hammarborr Pelarborr Glasögonfodral Basfiolsfodral Basfiolsfodralmakaregesäll Skomakare Vindrutetorkare Vindrutetorkarblad abba")),
         dict);
 
-    assertFiltersTo(tf, new String[] { "Bildörr", "Bil", "dörr", "Bilmotor",
+    assertTokenStreamContents(tf, new String[] { "Bildörr", "Bil", "dörr", "Bilmotor",
         "Bil", "motor", "Biltak", "Bil", "tak", "Slagborr", "Slag", "borr",
         "Hammarborr", "Hammar", "borr", "Pelarborr", "Pelar", "borr",
         "Glasögonfodral", "Glas", "ögon", "fodral", "Basfiolsfodral", "Bas",
@@ -147,7 +146,7 @@
         CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
         CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE, true);
 
-    assertFiltersTo(tf, new String[] { "Basfiolsfodralmakaregesäll", "Bas",
+    assertTokenStreamContents(tf, new String[] { "Basfiolsfodralmakaregesäll", "Bas",
         "fiolsfodral", "fodral", "makare", "gesäll" }, new int[] { 0, 0, 3, 8,
         14, 20 }, new int[] { 26, 3, 14, 14, 20, 26 }, new int[] { 1, 0, 0, 0,
         0, 0 });
@@ -185,22 +184,6 @@
     assertEquals("Rindfleischüberwachungsgesetz", termAtt.term());
   }
 
-  private void assertFiltersTo(TokenFilter tf, String[] s, int[] startOffset,
-      int[] endOffset, int[] posIncr) throws Exception {
-    TermAttribute termAtt = (TermAttribute) tf.getAttribute(TermAttribute.class);
-    OffsetAttribute offsetAtt = (OffsetAttribute) tf.getAttribute(OffsetAttribute.class);
-    PositionIncrementAttribute posIncAtt = (PositionIncrementAttribute) tf.getAttribute(PositionIncrementAttribute.class);
-    
-    for (int i = 0; i < s.length; ++i) {
-      assertTrue(tf.incrementToken());
-      assertEquals(s[i], termAtt.term());
-      assertEquals(startOffset[i], offsetAtt.startOffset());
-      assertEquals(endOffset[i], offsetAtt.endOffset());
-      assertEquals(posIncr[i], posIncAtt.getPositionIncrement());
-    }
-    assertFalse(tf.incrementToken());
-  }
-
   private void getHyphenationPatternFileContents() {
     if (patternsFileContent == null) {
       try {

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java Mon Aug 24 12:44:13 2009
@@ -21,13 +21,10 @@
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.StringReader;
-
-import junit.framework.TestCase;
 
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 /**
  * Test the CzechAnalyzer
@@ -35,7 +32,7 @@
  * CzechAnalyzer is like a StandardAnalyzer with a custom stopword list.
  *
  */
-public class TestCzechAnalyzer extends TestCase {
+public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
   File dataDir = new File(System.getProperty("dataDir", "./bin"));
   File customStopFile = new File(dataDir, "org/apache/lucene/analysis/cz/customStopWordFile.txt");
   
@@ -85,24 +82,4 @@
     assertAnalyzesToReuse(cz, "Česká Republika", new String[] { "česká" });
   }
 
-  private void assertAnalyzesTo(Analyzer a, String input, String[] output) throws Exception {
-    TokenStream ts = a.tokenStream("dummy", new StringReader(input));
-    TermAttribute text = (TermAttribute) ts.getAttribute(TermAttribute.class);
-    for (int i=0; i<output.length; i++) {
-      assertTrue(ts.incrementToken());
-      assertEquals(text.term(), output[i]);
-    }
-    assertFalse(ts.incrementToken());
-    ts.close();
-  }
-  
-  private void assertAnalyzesToReuse(Analyzer a, String input, String[] output) throws Exception {
-    TokenStream ts = a.reusableTokenStream("dummy", new StringReader(input));
-    TermAttribute text = (TermAttribute) ts.getAttribute(TermAttribute.class);
-    for (int i=0; i<output.length; i++) {
-      assertTrue(ts.incrementToken());
-      assertEquals(text.term(), output[i]);
-    }
-    assertFalse(ts.incrementToken());
-  }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java Mon Aug 24 12:44:13 2009
@@ -20,18 +20,14 @@
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.Reader;
-import java.io.StringReader;
-
-import junit.framework.TestCase;
 
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
 import org.apache.lucene.analysis.standard.StandardTokenizer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 /**
  * Test the German stemmer. The stemming algorithm is known to work less 
@@ -39,34 +35,29 @@
  * also check some of the cases where the algorithm is wrong.
  *
  */
-public class TestGermanStemFilter extends TestCase {
+public class TestGermanStemFilter extends BaseTokenStreamTestCase {
 
-  public void testStemming() {
-    try {
-      // read test cases from external file:
-      File dataDir = new File(System.getProperty("dataDir", "./bin"));
-      File testFile = new File(dataDir, "org/apache/lucene/analysis/de/data.txt");
-      FileInputStream fis = new FileInputStream(testFile);
-      InputStreamReader isr = new InputStreamReader(fis, "iso-8859-1");
-      BufferedReader breader = new BufferedReader(isr);
-      while(true) {
-        String line = breader.readLine();
-        if (line == null)
-          break;
-        line = line.trim();
-        if (line.startsWith("#") || line.equals(""))
-          continue;    // ignore comments and empty lines
-        String[] parts = line.split(";");
-        //System.out.println(parts[0] + " -- " + parts[1]);
-        check(parts[0], parts[1]);
-      }
-      breader.close();
-      isr.close();
-      fis.close();
-    } catch (IOException e) {
-       e.printStackTrace();
-       fail();
+  public void testStemming() throws Exception {
+    // read test cases from external file:
+    File dataDir = new File(System.getProperty("dataDir", "./bin"));
+    File testFile = new File(dataDir, "org/apache/lucene/analysis/de/data.txt");
+    FileInputStream fis = new FileInputStream(testFile);
+    InputStreamReader isr = new InputStreamReader(fis, "iso-8859-1");
+    BufferedReader breader = new BufferedReader(isr);
+    while(true) {
+      String line = breader.readLine();
+      if (line == null)
+        break;
+      line = line.trim();
+      if (line.startsWith("#") || line.equals(""))
+        continue;    // ignore comments and empty lines
+      String[] parts = line.split(";");
+      //System.out.println(parts[0] + " -- " + parts[1]);
+      check(parts[0], parts[1]);
     }
+    breader.close();
+    isr.close();
+    fis.close();
   }
   
   public void testReusableTokenStream() throws Exception {
@@ -100,20 +91,11 @@
     checkReuse(a, "tischen", "tischen");
   }
   
-  private void check(final String input, final String expected) throws IOException {
-    Analyzer a = new GermanAnalyzer();
-    TokenStream tokenStream = a.tokenStream("dummy", new StringReader(input));
-    TermAttribute termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class);
-    assertTrue(tokenStream.incrementToken());
-    assertEquals(expected, termAtt.term());
-    tokenStream.close();
+  private void check(final String input, final String expected) throws Exception {
+    checkOneTerm(new GermanAnalyzer(), input, expected);
   }
   
-  private void checkReuse(Analyzer a, String input, String expected) throws IOException {
-    TokenStream stream = a.reusableTokenStream("dummy", new StringReader(input));
-    TermAttribute text = (TermAttribute) stream.getAttribute(TermAttribute.class);
-    assertTrue(stream.incrementToken());
-    assertEquals(expected, text.term());
-    assertFalse(stream.incrementToken());
+  private void checkReuse(Analyzer a, String input, String expected) throws Exception {
+    checkOneTermReuse(a, input, expected);
   }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java Mon Aug 24 12:44:13 2009
@@ -16,49 +16,16 @@
  * limitations under the License.
  */
 
-import java.io.StringReader;
-
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 
 /**
  * A unit test class for verifying the correct operation of the GreekAnalyzer.
  *
  */
-public class GreekAnalyzerTest extends TestCase {
-
-	/**
-	 * A helper method copied from org.apache.lucene.analysis.TestAnalyzers.
-	 *
-	 * @param a			the Analyzer to test
-	 * @param input		an input String to analyze
-	 * @param output	a String[] with the results of the analysis
-	 * @throws Exception in case an error occurs
-	 */
-	private void assertAnalyzesTo(Analyzer a, String input, String[] output) throws Exception {
-		TokenStream ts = a.tokenStream("dummy", new StringReader(input));
-		TermAttribute termAtt = (TermAttribute) ts.getAttribute(TermAttribute.class);
-		for (int i=0; i<output.length; i++) {
-			assertTrue(ts.incrementToken());
-			assertEquals(termAtt.term(), output[i]);
-		}
-		assertFalse(ts.incrementToken());
-		ts.close();
-	}
-	
-	private void assertAnalyzesToReuse(Analyzer a, String input, String[] output) throws Exception {
-	    TokenStream ts = a.reusableTokenStream("dummy", new StringReader(input));
-	    TermAttribute termAtt = (TermAttribute) ts.getAttribute(TermAttribute.class);
-	    for (int i=0; i<output.length; i++) {
-	        assertTrue(ts.incrementToken());
-	        assertEquals(termAtt.term(), output[i]);
-	    }
-	    assertFalse(ts.incrementToken());
-	}
+public class GreekAnalyzerTest extends BaseTokenStreamTestCase {
 
 	/**
 	 * Test the analysis of various greek strings.

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java Mon Aug 24 12:44:13 2009
@@ -19,17 +19,15 @@
 
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 /**
  * Test the Persian Analyzer
  * 
  */
-public class TestPersianAnalyzer extends TestCase {
+public class TestPersianAnalyzer extends BaseTokenStreamTestCase {
 
   /**
    * This test fails with NPE when the stopwords file is missing in classpath
@@ -216,33 +214,4 @@
     assertAnalyzesToReuse(a, "برگ‌ها", new String[] { "برگ" });
   }
 
-  private void assertAnalyzesTo(Analyzer a, String input, String[] output)
-      throws Exception {
-	TokenStream ts = a.tokenStream("dummy", new StringReader(input));
-	TermAttribute termAtt = (TermAttribute) ts.getAttribute(TermAttribute.class);
-
-	for (int i = 0; i < output.length; i++) {
-		assertTrue(ts.incrementToken());
-		assertEquals(output[i], termAtt.term());
-	}
-	
-	assertFalse(ts.incrementToken());
-    ts.close();
-  }
-  
-  private void assertAnalyzesToReuse(Analyzer a, String input, String[] output)
-      throws Exception {
-    TokenStream ts = a.reusableTokenStream("dummy", new StringReader(input));
-    TermAttribute termAtt = (TermAttribute) ts
-        .getAttribute(TermAttribute.class);
-
-    for (int i = 0; i < output.length; i++) {
-      assertTrue(ts.incrementToken());
-      assertEquals(output[i], termAtt.term());
-    }
-
-    assertFalse(ts.incrementToken());
-    ts.close();
-  }
-
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java Mon Aug 24 12:44:13 2009
@@ -20,16 +20,14 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.ar.ArabicLetterTokenizer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 /**
  * Test the Arabic Normalization Filter
  * 
  */
-public class TestPersianNormalizationFilter extends TestCase {
+public class TestPersianNormalizationFilter extends BaseTokenStreamTestCase {
 
   public void testFarsiYeh() throws IOException {
     check("های", "هاي");
@@ -55,17 +53,12 @@
     check("زادہ", "زاده");
   }
 
-  private void check(final String input, final String expected)
-      throws IOException {
+  private void check(final String input, final String expected) throws IOException {
     ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(
         new StringReader(input));
     PersianNormalizationFilter filter = new PersianNormalizationFilter(
         tokenStream);
-    TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class);
-    assertTrue(filter.incrementToken());
-    assertEquals(expected, termAtt.term());
-    assertFalse(filter.incrementToken());
-    filter.close();
+    assertTokenStreamContents(filter, new String[]{expected});
   }
 
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java Mon Aug 24 12:44:13 2009
@@ -24,8 +24,7 @@
 import java.util.List;
 import java.util.Set;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -34,9 +33,9 @@
 /**
  * 
  */
-public class TestElision extends TestCase {
+public class TestElision extends BaseTokenStreamTestCase {
 
-  public void testElision() {
+  public void testElision() throws Exception {
     String test = "Plop, juste pour voir l'embrouille avec O'brian. M'enfin.";
     Tokenizer tokenizer = new StandardTokenizer(new StringReader(test));
     Set articles = new HashSet();
@@ -49,15 +48,11 @@
     assertEquals("enfin", tas.get(7));
   }
 
-  private List filtre(TokenFilter filter) {
+  private List filtre(TokenFilter filter) throws IOException {
     List tas = new ArrayList();
-    try {
-      TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class);
-      while (filter.incrementToken()) {
-        tas.add(termAtt.term());
-      }
-    } catch (IOException e) {
-      e.printStackTrace();
+    TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class);
+    while (filter.incrementToken()) {
+      tas.add(termAtt.term());
     }
     return tas;
   }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java Mon Aug 24 12:44:13 2009
@@ -56,11 +56,9 @@
 
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 /**
  * Test case for FrenchAnalyzer.
@@ -68,35 +66,7 @@
  * @version   $version$
  */
 
-public class TestFrenchAnalyzer extends TestCase {
-
-	// Method copied from TestAnalyzers, maybe should be refactored
-	public void assertAnalyzesTo(Analyzer a, String input, String[] output)
-		throws Exception {
-
-		TokenStream ts = a.tokenStream("dummy", new StringReader(input));
-
-		TermAttribute termAtt = (TermAttribute) ts.getAttribute(TermAttribute.class);
-		for (int i = 0; i < output.length; i++) {
-			assertTrue(ts.incrementToken());
-			assertEquals(termAtt.term(), output[i]);
-		}
-		assertFalse(ts.incrementToken());
-		ts.close();
-	}
-	
-   public void assertAnalyzesToReuse(Analyzer a, String input, String[] output)
-       throws Exception {
-
-       TokenStream ts = a.reusableTokenStream("dummy", new StringReader(input));
-
-       TermAttribute termAtt = (TermAttribute) ts.getAttribute(TermAttribute.class);
-       for (int i = 0; i < output.length; i++) {
-           assertTrue(ts.incrementToken());
-           assertEquals(termAtt.term(), output[i]);
-       }
-       assertFalse(ts.incrementToken());
-   }
+public class TestFrenchAnalyzer extends BaseTokenStreamTestCase {
 
 	public void testAnalyzer() throws Exception {
 		FrenchAnalyzer fa = new FrenchAnalyzer();

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java Mon Aug 24 12:44:13 2009
@@ -19,11 +19,10 @@
 
 import java.io.IOException;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.analysis.TokenStream;
 
-public class TestEmptyTokenStream extends TestCase {
+public class TestEmptyTokenStream extends LuceneTestCase {
 
   public void test() throws IOException {
     TokenStream ts = new EmptyTokenStream();

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java Mon Aug 24 12:44:13 2009
@@ -17,7 +17,7 @@
  * limitations under the License.
  */
 
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
@@ -27,7 +27,7 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-public class TestPrefixAndSuffixAwareTokenFilter extends TestCase {
+public class TestPrefixAndSuffixAwareTokenFilter extends BaseTokenStreamTestCase {
 
   public void test() throws IOException {
 

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java Mon Aug 24 12:44:13 2009
@@ -17,7 +17,7 @@
  * limitations under the License.
  */
 
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
@@ -27,7 +27,7 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-public class TestPrefixAwareTokenFilter extends TestCase {
+public class TestPrefixAwareTokenFilter extends BaseTokenStreamTestCase {
 
   public void test() throws IOException {
 

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestSingleTokenTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestSingleTokenTokenFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestSingleTokenTokenFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestSingleTokenTokenFilter.java Mon Aug 24 12:44:13 2009
@@ -17,20 +17,27 @@
  * limitations under the License.
  */
 
-import junit.framework.TestCase;
-
 import java.io.IOException;
 
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.analysis.Token;
 
-public class TestSingleTokenTokenFilter extends TestCase {
+public class TestSingleTokenTokenFilter extends LuceneTestCase {
 
   public void test() throws IOException {
+    final Token reusableToken = new Token();
+    
     Token token = new Token();
-
     SingleTokenTokenStream ts = new SingleTokenTokenStream(token);
+    ts.reset();
+
+    assertEquals(token, ts.next(reusableToken));
+    assertNull(ts.next(reusableToken));
+    
+    token = new Token("hallo", 10, 20, "someType");
+    ts.setToken(token);
+    ts.reset();
 
-    final Token reusableToken = new Token();
     assertEquals(token, ts.next(reusableToken));
     assertNull(ts.next(reusableToken));
   }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java Mon Aug 24 12:44:13 2009
@@ -19,19 +19,18 @@
 
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
 /**
  * Tests {@link EdgeNGramTokenFilter} for correctness.
  */
-public class EdgeNGramTokenFilterTest extends TestCase {
+public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
   private TokenStream input;
 
-  public void setUp() {
+  public void setUp() throws Exception {
+    super.setUp();
     input = new WhitespaceTokenizer(new StringReader("abcde"));
   }
 
@@ -67,71 +66,40 @@
 
   public void testFrontUnigram() throws Exception {
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 1, 1);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(a,0,1)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1});
   }
 
   public void testBackUnigram() throws Exception {
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.BACK, 1, 1);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(e,4,5)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"e"}, new int[]{4}, new int[]{5});
   }
 
   public void testOversizedNgrams() throws Exception {
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 6, 6);
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0]);
   }
 
   public void testFrontRangeOfNgrams() throws Exception {
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(a,0,1)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(ab,0,2)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(abc,0,3)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
   }
 
   public void testBackRangeOfNgrams() throws Exception {
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.BACK, 1, 3);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(e,4,5)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(de,3,5)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(cde,2,5)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"e","de","cde"}, new int[]{4,3,2}, new int[]{5,5,5});
   }
   
   public void testSmallTokenInStream() throws Exception {
     input = new WhitespaceTokenizer(new StringReader("abc de fgh"));
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(abc,0,3)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(fgh,0,3)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"abc","fgh"}, new int[]{0,0}, new int[]{3,3});
   }
   
   public void testReset() throws Exception {
     WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(new StringReader("abcde"));
     EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
-    TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class);
-    assertTrue(filter.incrementToken());
-    assertEquals("(a,0,1)", termAtt.toString());
-    assertTrue(filter.incrementToken());
-    assertEquals("(ab,0,2)", termAtt.toString());
+    assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
     tokenizer.reset(new StringReader("abcde"));
-    filter.reset();
-    assertTrue(filter.incrementToken());
-    assertEquals("(a,0,1)", termAtt.toString());
+    assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
   }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java Mon Aug 24 12:44:13 2009
@@ -20,17 +20,16 @@
 
 import java.io.StringReader;
 
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 
 /**
  * Tests {@link EdgeNGramTokenizer} for correctness.
  */
-public class EdgeNGramTokenizerTest extends TestCase {
+public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
   private StringReader input;
 
-  public void setUp() {
+  public void setUp() throws Exception {
+    super.setUp();
     input = new StringReader("abcde");
   }
 
@@ -66,58 +65,33 @@
 
   public void testFrontUnigram() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 1);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(a,0,1)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1});
   }
 
   public void testBackUnigram() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.BACK, 1, 1);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(e,4,5)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"e"}, new int[]{4}, new int[]{5});
   }
 
   public void testOversizedNgrams() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 6, 6);
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0]);
   }
 
   public void testFrontRangeOfNgrams() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(a,0,1)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(ab,0,2)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(abc,0,3)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
   }
 
   public void testBackRangeOfNgrams() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.BACK, 1, 3);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(e,4,5)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(de,3,5)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(cde,2,5)", termAtt.toString());
-    assertFalse(tokenizer.incrementToken());
+    assertTokenStreamContents(tokenizer, new String[]{"e","de","cde"}, new int[]{4,3,2}, new int[]{5,5,5});
   }
   
   public void testReset() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
-    TermAttribute termAtt = (TermAttribute) tokenizer.addAttribute(TermAttribute.class);
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(a,0,1)", termAtt.toString());
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(ab,0,2)", termAtt.toString());
+    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
     tokenizer.reset(new StringReader("abcde"));
-    assertTrue(tokenizer.incrementToken());
-    assertEquals("(a,0,1)", termAtt.toString());
+    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
   }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java Mon Aug 24 12:44:13 2009
@@ -19,20 +19,19 @@
 
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 
 import java.io.IOException;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
 /**
  * Tests {@link NGramTokenFilter} for correctness.
  */
-public class NGramTokenFilterTest extends TestCase {
+public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
     private TokenStream input;
     
-    public void setUp() {
+    public void setUp() throws Exception {
+        super.setUp();
         input = new WhitespaceTokenizer(new StringReader("abcde"));
     }
 
@@ -56,70 +55,41 @@
         assertTrue(gotException);
     }
 
-    private void checkStream(TokenStream stream, String[] exp) throws IOException {
-      TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);
-      for (int i = 0; i < exp.length; i++) {
-        assertTrue(stream.incrementToken());
-        assertEquals(exp[i], termAtt.toString());
-      }
-      assertFalse(stream.incrementToken());
-    }
-    
     public void testUnigrams() throws Exception {
       NGramTokenFilter filter = new NGramTokenFilter(input, 1, 1);
-      String[] exp = new String[] {
-        "(a,0,1)", "(b,1,2)", "(c,2,3)", "(d,3,4)", "(e,4,5)"
-      };
-      
-      checkStream(filter, exp);
+      assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
     }
 
     public void testBigrams() throws Exception {
       NGramTokenFilter filter = new NGramTokenFilter(input, 2, 2);
-      String[] exp = new String[] {
-          "(ab,0,2)", "(bc,1,3)", "(cd,2,4)", "(de,3,5)"
-        };
-        
-      checkStream(filter, exp);
+      assertTokenStreamContents(filter, new String[]{"ab","bc","cd","de"}, new int[]{0,1,2,3}, new int[]{2,3,4,5});
     }
 
     public void testNgrams() throws Exception {
       NGramTokenFilter filter = new NGramTokenFilter(input, 1, 3);
-      String[] exp = new String[] {
-          "(a,0,1)", "(b,1,2)", "(c,2,3)", "(d,3,4)", "(e,4,5)",
-          "(ab,0,2)", "(bc,1,3)", "(cd,2,4)", "(de,3,5)",
-          "(abc,0,3)", "(bcd,1,4)", "(cde,2,5)"
-      };
-        
-      checkStream(filter, exp);
+      assertTokenStreamContents(filter,
+        new String[]{"a","b","c","d","e", "ab","bc","cd","de", "abc","bcd","cde"}, 
+        new int[]{0,1,2,3,4, 0,1,2,3, 0,1,2},
+        new int[]{1,2,3,4,5, 2,3,4,5, 3,4,5}
+      );
     }
 
     public void testOversizedNgrams() throws Exception {
       NGramTokenFilter filter = new NGramTokenFilter(input, 6, 7);
-      assertFalse(filter.incrementToken());
+      assertTokenStreamContents(filter, new String[0], new int[0], new int[0]);
     }
     
     public void testSmallTokenInStream() throws Exception {
       input = new WhitespaceTokenizer(new StringReader("abc de fgh"));
       NGramTokenFilter filter = new NGramTokenFilter(input, 3, 3);
-      String[] exp = new String[] {
-          "(abc,0,3)", "(fgh,0,3)"
-        };
-        
-      checkStream(filter, exp);
+      assertTokenStreamContents(filter, new String[]{"abc","fgh"}, new int[]{0,0}, new int[]{3,3});
     }
     
     public void testReset() throws Exception {
       WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(new StringReader("abcde"));
-      NGramTokenFilter filter = new NGramTokenFilter(tokenizer, 1, 3);
-      TermAttribute termAtt = (TermAttribute) filter.addAttribute(TermAttribute.class);
-      assertTrue(filter.incrementToken());
-      assertEquals("(a,0,1)", termAtt.toString());
-      assertTrue(filter.incrementToken());
-      assertEquals("(b,1,2)", termAtt.toString());
+      NGramTokenFilter filter = new NGramTokenFilter(tokenizer, 1, 1);
+      assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
       tokenizer.reset(new StringReader("abcde"));
-      filter.reset();
-      assertTrue(filter.incrementToken());
-      assertEquals("(a,0,1)", termAtt.toString());
+      assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
     }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java Mon Aug 24 12:44:13 2009
@@ -22,17 +22,16 @@
 import java.io.StringReader;
 
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 
 /**
  * Tests {@link NGramTokenizer} for correctness.
  */
-public class NGramTokenizerTest extends TestCase {
+public class NGramTokenizerTest extends BaseTokenStreamTestCase {
     private StringReader input;
     
-    public void setUp() {
+    public void setUp() throws Exception {
+        super.setUp();
         input = new StringReader("abcde");
     }
 
@@ -55,60 +54,35 @@
         }
         assertTrue(gotException);
     }
-    
-    private void checkStream(TokenStream stream, String[] exp) throws IOException {
-      TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);
-      for (int i = 0; i < exp.length; i++) {
-        assertTrue(stream.incrementToken());
-        assertEquals(exp[i], termAtt.toString());
-      }
-      assertFalse(stream.incrementToken());
-    }
 
     public void testUnigrams() throws Exception {
         NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 1);
-        
-        String[] exp = new String[] {
-            "(a,0,1)", "(b,1,2)", "(c,2,3)", "(d,3,4)", "(e,4,5)"
-          };
-          
-        checkStream(tokenizer, exp);
+        assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
     }
 
     public void testBigrams() throws Exception {
         NGramTokenizer tokenizer = new NGramTokenizer(input, 2, 2);
-        String[] exp = new String[] {
-            "(ab,0,2)", "(bc,1,3)", "(cd,2,4)", "(de,3,5)"
-          };
-          
-        checkStream(tokenizer, exp);
+        assertTokenStreamContents(tokenizer, new String[]{"ab","bc","cd","de"}, new int[]{0,1,2,3}, new int[]{2,3,4,5});
     }
 
     public void testNgrams() throws Exception {
         NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 3);
-        String[] exp = new String[] {
-            "(a,0,1)", "(b,1,2)", "(c,2,3)", "(d,3,4)", "(e,4,5)",
-            "(ab,0,2)", "(bc,1,3)", "(cd,2,4)", "(de,3,5)",
-            "(abc,0,3)", "(bcd,1,4)", "(cde,2,5)"
-        };
-          
-        checkStream(tokenizer, exp);
+        assertTokenStreamContents(tokenizer,
+          new String[]{"a","b","c","d","e", "ab","bc","cd","de", "abc","bcd","cde"}, 
+          new int[]{0,1,2,3,4, 0,1,2,3, 0,1,2},
+          new int[]{1,2,3,4,5, 2,3,4,5, 3,4,5}
+        );
     }
 
     public void testOversizedNgrams() throws Exception {
         NGramTokenizer tokenizer = new NGramTokenizer(input, 6, 7);
-        assertFalse(tokenizer.incrementToken());
+        assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0]);
     }
     
     public void testReset() throws Exception {
-      NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 3);
-      TermAttribute termAtt = (TermAttribute) tokenizer.getAttribute(TermAttribute.class);
-      assertTrue(tokenizer.incrementToken());
-      assertEquals("(a,0,1)", termAtt.toString());
-      assertTrue(tokenizer.incrementToken());
-      assertEquals("(b,1,2)", termAtt.toString());
+      NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 1);
+      assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
       tokenizer.reset(new StringReader("abcde"));
-      assertTrue(tokenizer.incrementToken());
-      assertEquals("(a,0,1)", termAtt.toString());
+      assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
     }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java Mon Aug 24 12:44:13 2009
@@ -18,16 +18,12 @@
  */
 
 import java.io.File;
-import java.io.IOException;
 import java.io.Reader;
-import java.io.StringReader;
-
-import junit.framework.TestCase;
 
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
 /**
  * Test the Dutch Stem Filter, which only modifies the term text.
@@ -35,11 +31,11 @@
  * The code states that it uses the snowball algorithm, but tests reveal some differences.
  * 
  */
-public class TestDutchStemmer extends TestCase {
+public class TestDutchStemmer extends BaseTokenStreamTestCase {
   File dataDir = new File(System.getProperty("dataDir", "./bin"));
   File customDictFile = new File(dataDir, "org/apache/lucene/analysis/nl/customStemDict.txt");
   
-  public void testWithSnowballExamples() throws IOException {
+  public void testWithSnowballExamples() throws Exception {
 	 check("lichaamsziek", "lichaamsziek");
 	 check("lichamelijk", "licham");
 	 check("lichamelijke", "licham");
@@ -124,10 +120,10 @@
   
   public void testReusableTokenStream() throws Exception {
     Analyzer a = new DutchAnalyzer(); 
-    checkReuse(a, "lichaamsziek", "lichaamsziek");
-    checkReuse(a, "lichamelijk", "licham");
-    checkReuse(a, "lichamelijke", "licham");
-    checkReuse(a, "lichamelijkheden", "licham");
+    checkOneTermReuse(a, "lichaamsziek", "lichaamsziek");
+    checkOneTermReuse(a, "lichamelijk", "licham");
+    checkOneTermReuse(a, "lichamelijke", "licham");
+    checkOneTermReuse(a, "lichamelijkheden", "licham");
   }
   
   /**
@@ -141,10 +137,10 @@
   
   public void testLUCENE1678BWComp() throws Exception {
     Analyzer a = new DutchSubclassAnalyzer();
-    checkReuse(a, "lichaamsziek", "lichaamsziek");
-    checkReuse(a, "lichamelijk", "lichamelijk");
-    checkReuse(a, "lichamelijke", "lichamelijke");
-    checkReuse(a, "lichamelijkheden", "lichamelijkheden");
+    checkOneTermReuse(a, "lichaamsziek", "lichaamsziek");
+    checkOneTermReuse(a, "lichamelijk", "lichamelijk");
+    checkOneTermReuse(a, "lichamelijke", "lichamelijke");
+    checkOneTermReuse(a, "lichamelijkheden", "lichamelijkheden");
   }
  
   /* 
@@ -153,9 +149,9 @@
    */
   public void testExclusionTableReuse() throws Exception {
     DutchAnalyzer a = new DutchAnalyzer();
-    checkReuse(a, "lichamelijk", "licham");
+    checkOneTermReuse(a, "lichamelijk", "licham");
     a.setStemExclusionTable(new String[] { "lichamelijk" });
-    checkReuse(a, "lichamelijk", "lichamelijk");
+    checkOneTermReuse(a, "lichamelijk", "lichamelijk");
   }
   
   /* 
@@ -164,30 +160,13 @@
    */
   public void testStemDictionaryReuse() throws Exception {
     DutchAnalyzer a = new DutchAnalyzer();
-    checkReuse(a, "lichamelijk", "licham");
+    checkOneTermReuse(a, "lichamelijk", "licham");
     a.setStemDictionary(customDictFile);
-    checkReuse(a, "lichamelijk", "somethingentirelydifferent");
+    checkOneTermReuse(a, "lichamelijk", "somethingentirelydifferent");
   }
   
-  private void check(final String input, final String expected) throws IOException {
-    Analyzer analyzer = new DutchAnalyzer(); 
-    TokenStream stream = analyzer.tokenStream("dummy", new StringReader(input));
-    TermAttribute text = (TermAttribute) stream.getAttribute(TermAttribute.class);
-    assertTrue(stream.incrementToken());
-    assertEquals(expected, text.term());
-    assertFalse(stream.incrementToken());
-    stream.close();
-  }
-  
-  private void checkReuse(Analyzer a, final String input, final String expected)
-      throws IOException {
-    TokenStream stream = a
-        .reusableTokenStream("dummy", new StringReader(input));
-    TermAttribute text = (TermAttribute) stream
-        .getAttribute(TermAttribute.class);
-    assertTrue(stream.incrementToken());
-    assertEquals(expected, text.term());
-    assertFalse(stream.incrementToken());
+  private void check(final String input, final String expected) throws Exception {
+    checkOneTerm(new DutchAnalyzer(), input, expected); 
   }
-
+  
 }
\ No newline at end of file

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java Mon Aug 24 12:44:13 2009
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
@@ -27,20 +27,13 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-public class NumericPayloadTokenFilterTest extends TestCase {
+public class NumericPayloadTokenFilterTest extends BaseTokenStreamTestCase {
 
 
   public NumericPayloadTokenFilterTest(String s) {
     super(s);
   }
 
-  protected void setUp() {
-  }
-
-  protected void tearDown() {
-
-  }
-
   public void test() throws IOException {
     String test = "The quick red fox jumped over the lazy brown dogs";
 

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java Mon Aug 24 12:44:13 2009
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
@@ -25,20 +25,13 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-public class TokenOffsetPayloadTokenFilterTest extends TestCase {
+public class TokenOffsetPayloadTokenFilterTest extends BaseTokenStreamTestCase {
 
 
   public TokenOffsetPayloadTokenFilterTest(String s) {
     super(s);
   }
 
-  protected void setUp() {
-  }
-
-  protected void tearDown() {
-
-  }
-
   public void test() throws IOException {
     String test = "The quick red fox jumped over the lazy brown dogs";
 

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java Mon Aug 24 12:44:13 2009
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
@@ -27,21 +27,13 @@
 import java.io.IOException;
 import java.io.StringReader;
 
-public class TypeAsPayloadTokenFilterTest extends TestCase {
+public class TypeAsPayloadTokenFilterTest extends BaseTokenStreamTestCase {
 
 
   public TypeAsPayloadTokenFilterTest(String s) {
     super(s);
   }
 
-  protected void setUp() {
-  }
-
-  protected void tearDown() {
-
-  }
-
-
   public void test() throws IOException {
     String test = "The quick red fox jumped over the lazy brown dogs";
 

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/position/PositionFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/position/PositionFilterTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/position/PositionFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/position/PositionFilterTest.java Mon Aug 24 12:44:13 2009
@@ -19,13 +19,12 @@
 
 import java.io.IOException;
 
-import junit.framework.TestCase;
-import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.shingle.ShingleFilter;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 
-public class PositionFilterTest extends TestCase {
+public class PositionFilterTest extends BaseTokenStreamTestCase {
 
   public class TestTokenStream extends TokenStream {
 
@@ -40,6 +39,7 @@
     }
 
     public final boolean incrementToken() throws IOException {
+      clearAttributes();
       if (index < testToken.length) {
         termAtt.setTermBuffer(testToken[index++]);
         return true;
@@ -52,9 +52,6 @@
     }
   }
 
-  public static void main(String[] args) {
-    junit.textui.TestRunner.run(PositionFilterTest.class);
-  }
   public static final String[] TEST_TOKEN = new String[]{
     "please",
     "divide",
@@ -105,65 +102,39 @@
     "word"
   };
 
-  public void testFilter() throws IOException {
+  public void testFilter() throws Exception {
 
-    filterTest(new PositionFilter(new TestTokenStream(TEST_TOKEN)),
+    assertTokenStreamContents(new PositionFilter(new TestTokenStream(TEST_TOKEN)),
                TEST_TOKEN,
                TEST_TOKEN_POSITION_INCREMENTS);
   }
 
-  public void testNonZeroPositionIncrement() throws IOException {
+  public void testNonZeroPositionIncrement() throws Exception {
     
-    filterTest(new PositionFilter(new TestTokenStream(TEST_TOKEN), 5),
+    assertTokenStreamContents(new PositionFilter(new TestTokenStream(TEST_TOKEN), 5),
                TEST_TOKEN,
                TEST_TOKEN_NON_ZERO_POSITION_INCREMENTS);
   }
   
-  public void testReset() throws IOException {
+  public void testReset() throws Exception {
 
     PositionFilter filter = new PositionFilter(new TestTokenStream(TEST_TOKEN));
-    filterTest(filter, TEST_TOKEN, TEST_TOKEN_POSITION_INCREMENTS);
+    assertTokenStreamContents(filter, TEST_TOKEN, TEST_TOKEN_POSITION_INCREMENTS);
     filter.reset();
     // Make sure that the reset filter provides correct position increments
-    filterTest(filter, TEST_TOKEN, TEST_TOKEN_POSITION_INCREMENTS);
+    assertTokenStreamContents(filter, TEST_TOKEN, TEST_TOKEN_POSITION_INCREMENTS);
   }
   
   /** Tests ShingleFilter up to six shingles against six terms.
    *  Tests PositionFilter setting all but the first positionIncrement to zero.
    * @throws java.io.IOException @see Token#next(Token)
    */
-  public void test6GramFilterNoPositions() throws IOException {
+  public void test6GramFilterNoPositions() throws Exception {
 
     ShingleFilter filter = new ShingleFilter(new TestTokenStream(TEST_TOKEN), 6);
-    filterTest(new PositionFilter(filter),
+    assertTokenStreamContents(new PositionFilter(filter),
                SIX_GRAM_NO_POSITIONS_TOKENS,
                SIX_GRAM_NO_POSITIONS_INCREMENTS);
   }
 
-  protected TokenStream filterTest(final TokenStream filter,
-                                   final String[] tokensToCompare,
-                                   final int[] positionIncrements)
-      throws IOException {
-
-    int i = 0;
-    final Token reusableToken = new Token();
-
-    for (Token nextToken = filter.next(reusableToken)
-        ; i < tokensToCompare.length
-        ; nextToken = filter.next(reusableToken)) {
-
-      if (null != nextToken) {
-        final String termText = nextToken.term();
-        final String goldText = tokensToCompare[i];
-
-        assertEquals("Wrong termText", goldText, termText);
-        assertEquals("Wrong positionIncrement for token \"" + termText + "\"",
-                     positionIncrements[i], nextToken.getPositionIncrement());
-      }else{
-        assertNull(tokensToCompare[i]);
-      }
-      i++;
-    }
-    return filter;
-  }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java Mon Aug 24 12:44:13 2009
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-import junit.framework.TestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LetterTokenizer;
 import org.apache.lucene.analysis.TokenStream;
@@ -39,7 +39,7 @@
 import java.io.Reader;
 import java.io.StringReader;
 
-public class QueryAutoStopWordAnalyzerTest extends TestCase {
+public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
   String variedFieldValues[] = {"the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"};
   String repetitiveFieldValues[] = {"boring", "boring", "vaguelyboring"};
   RAMDirectory dir;
@@ -67,8 +67,8 @@
   }
 
   protected void tearDown() throws Exception {
-    super.tearDown();
     reader.close();
+    super.tearDown();
   }
 
   //Helper method to query

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java Mon Aug 24 12:44:13 2009
@@ -22,9 +22,9 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 
-public class TestReverseStringFilter extends LuceneTestCase {
+public class TestReverseStringFilter extends BaseTokenStreamTestCase {
   public void testFilter() throws Exception {
     TokenStream stream = new WhitespaceTokenizer(
         new StringReader("Do have a nice day"));     // 1-4 length string

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java?rev=807190&r1=807189&r2=807190&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java Mon Aug 24 12:44:13 2009
@@ -24,8 +24,7 @@
 import java.io.Reader;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
-
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
@@ -37,7 +36,7 @@
  * @version   $Id$
  */
 
-public class TestRussianAnalyzer extends TestCase
+public class TestRussianAnalyzer extends BaseTokenStreamTestCase
 {
     private InputStreamReader inWords;
 
@@ -55,6 +54,7 @@
 
     protected void setUp() throws Exception
     {
+      super.setUp();
       dataDir = new File(System.getProperty("dataDir", "./bin"));
     }
 
@@ -195,14 +195,4 @@
       assertAnalyzesToReuse(a, "Но знание это хранилось в тайне",
           new String[] { "знан", "хран", "тайн" });
     }
-
-    private void assertAnalyzesToReuse(Analyzer a, String input, String[] output) throws Exception {
-      TokenStream ts = a.reusableTokenStream("dummy", new StringReader(input));
-      TermAttribute termAtt = (TermAttribute) ts.getAttribute(TermAttribute.class);
-      for (int i=0; i<output.length; i++) {
-          assertTrue(ts.incrementToken());
-          assertEquals(termAtt.term(), output[i]);
-      }
-      assertFalse(ts.incrementToken());
-    }
 }



Mime
View raw message