lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rm...@apache.org
Subject svn commit: r1664421 [4/6] - in /lucene/dev/branches/branch_5x: ./ lucene/ lucene/analysis/ lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/ lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/ lucene/analysis/common/src/te...
Date Thu, 05 Mar 2015 17:09:14 GMT
Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java Thu Mar  5 17:09:12 2015
@@ -6,6 +6,7 @@ import org.apache.lucene.analysis.TokenF
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.TestUtil;
 
 import java.io.BufferedReader;
@@ -86,15 +87,42 @@ public class TestUAX29URLEmailTokenizer
     BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
   }
 
-  private Analyzer a = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-
-      Tokenizer tokenizer = new UAX29URLEmailTokenizer(newAttributeFactory());
-      return new TokenStreamComponents(tokenizer);
-    }
-  };
-
+  private Analyzer a, urlAnalyzer, emailAnalyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    a = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new UAX29URLEmailTokenizer(newAttributeFactory());
+        return new TokenStreamComponents(tokenizer);
+      }
+    };
+    urlAnalyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(newAttributeFactory());
+        tokenizer.setMaxTokenLength(Integer.MAX_VALUE);  // Tokenize arbitrary length URLs
+        TokenFilter filter = new URLFilter(tokenizer);
+        return new TokenStreamComponents(tokenizer, filter);
+      }
+    };
+    emailAnalyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(newAttributeFactory());
+        TokenFilter filter = new EmailFilter(tokenizer);
+        return new TokenStreamComponents(tokenizer, filter);
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(a, urlAnalyzer, emailAnalyzer);
+    super.tearDown();
+  }
 
   /** Passes through tokens with type "<URL>" and blocks all other types. */
   private class URLFilter extends TokenFilter {
@@ -132,27 +160,7 @@ public class TestUAX29URLEmailTokenizer
       }
       return isTokenAvailable;
     }
-  }
-
-  private Analyzer urlAnalyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(newAttributeFactory());
-      tokenizer.setMaxTokenLength(Integer.MAX_VALUE);  // Tokenize arbitrary length URLs
-      TokenFilter filter = new URLFilter(tokenizer);
-      return new TokenStreamComponents(tokenizer, filter);
-    }
-  };
-
-  private Analyzer emailAnalyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(newAttributeFactory());
-      TokenFilter filter = new EmailFilter(tokenizer);
-      return new TokenStreamComponents(tokenizer, filter);
-    }
-  };
-  
+  }  
   
   public void testArmenian() throws Exception {
     BaseTokenStreamTestCase.assertAnalyzesTo(a, "Վիքիպեդիայի 13 միլիոն հոդվածները (4,600` հայերեն վիքիպեդիայում) գրվել են կամավորների կողմից ու համարյա բոլոր հոդվածները կարող է խմբագրել ցանկաց մարդ ով կարող է բացել Վիքիպեդիայի կայքը։",

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java Thu Mar  5 17:09:12 2015
@@ -28,7 +28,7 @@ public class TestSwedishAnalyzer extends
   /** This test fails with NPE when the 
    * stopwords file is missing in classpath */
   public void testResourcesAvailable() {
-    new SwedishAnalyzer();
+    new SwedishAnalyzer().close();
   }
   
   /** test stopwords and stemming */
@@ -39,6 +39,7 @@ public class TestSwedishAnalyzer extends
     checkOneTerm(a, "jaktkarlens", "jaktkarl");
     // stopword
     assertAnalyzesTo(a, "och", new String[] {});
+    a.close();
   }
   
   /** test use of exclusion set */
@@ -48,11 +49,14 @@ public class TestSwedishAnalyzer extends
         SwedishAnalyzer.getDefaultStopSet(), exclusionSet);
     checkOneTerm(a, "jaktkarlarne", "jaktkarlarne");
     checkOneTerm(a, "jaktkarlens", "jaktkarl");
+    a.close();
   }
   
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new SwedishAnalyzer(), 1000*RANDOM_MULTIPLIER);
+    Analyzer analyzer = new SwedishAnalyzer();
+    checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
+    analyzer.close();
   }
 
   public void testBackcompat40() throws IOException {

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java Thu Mar  5 17:09:12 2015
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.sv;
  */
 
 import java.io.IOException;
-import java.io.Reader;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
@@ -35,13 +34,25 @@ import static org.apache.lucene.analysis
  * Simple tests for {@link SwedishLightStemFilter}
  */
 public class TestSwedishLightStemFilter extends BaseTokenStreamTestCase {
-  private Analyzer analyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-      return new TokenStreamComponents(source, new SwedishLightStemFilter(source));
-    }
-  };
+  private Analyzer analyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(source, new SwedishLightStemFilter(source));
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    analyzer.close();
+    super.tearDown();
+  }
   
   /** Test against a vocabulary from the reference impl */
   public void testVocabulary() throws IOException {
@@ -59,6 +70,7 @@ public class TestSwedishLightStemFilter
       }
     };
     checkOneTerm(a, "jaktkarlens", "jaktkarlens");
+    a.close();
   }
   
   /** blast some random strings through the analyzer */
@@ -75,5 +87,6 @@ public class TestSwedishLightStemFilter
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java Thu Mar  5 17:09:12 2015
@@ -17,7 +17,6 @@ package org.apache.lucene.analysis.synon
  * limitations under the License.
  */
 
-import java.io.Reader;
 import java.io.StringReader;
 import java.text.ParseException;
 
@@ -27,7 +26,6 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.en.EnglishAnalyzer;
-import org.junit.Test;
 
 /**
  * Tests parser for the Solr synonyms format
@@ -43,11 +41,13 @@ public class TestSolrSynonymParser exten
     "foo => baz\n" +
     "this test, that testing";
     
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random()));
+    Analyzer analyzer = new MockAnalyzer(random());
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
     parser.parse(new StringReader(testFile));
     final SynonymMap map = parser.build();
+    analyzer.close();
     
-    Analyzer analyzer = new Analyzer() {
+    analyzer = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName) {
         Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true);
@@ -70,46 +70,77 @@ public class TestSolrSynonymParser exten
     assertAnalyzesTo(analyzer, "this test",
         new String[] { "this", "that", "test", "testing" },
         new int[] { 1, 0, 1, 0 });
+    analyzer.close();
   }
   
   /** parse a syn file with bad syntax */
-  @Test(expected=ParseException.class)
   public void testInvalidDoubleMap() throws Exception {
-    String testFile = "a => b => c"; 
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random()));
-    parser.parse(new StringReader(testFile));
+    String testFile = "a => b => c";
+    Analyzer analyzer = new MockAnalyzer(random());
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
+    try {
+      parser.parse(new StringReader(testFile));
+      fail("didn't get expected exception");
+    } catch (ParseException expected) {
+      // expected exc
+    }
+    analyzer.close();
   }
   
   /** parse a syn file with bad syntax */
-  @Test(expected=ParseException.class)
   public void testInvalidAnalyzesToNothingOutput() throws Exception {
     String testFile = "a => 1"; 
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random(), MockTokenizer.SIMPLE, false));
-    parser.parse(new StringReader(testFile));
+    Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, false);
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
+    try {
+      parser.parse(new StringReader(testFile));
+      fail("didn't get expected exception");
+    } catch (ParseException expected) {
+      // expected exc
+    }
+    analyzer.close();
   }
   
   /** parse a syn file with bad syntax */
-  @Test(expected=ParseException.class)
   public void testInvalidAnalyzesToNothingInput() throws Exception {
-    String testFile = "1 => a"; 
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random(), MockTokenizer.SIMPLE, false));
-    parser.parse(new StringReader(testFile));
+    String testFile = "1 => a";
+    Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, false);
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
+    try {
+      parser.parse(new StringReader(testFile));
+      fail("didn't get expected exception");
+    } catch (ParseException expected) {
+      // expected exc
+    }
+    analyzer.close();
   }
   
   /** parse a syn file with bad syntax */
-  @Test(expected=ParseException.class)
   public void testInvalidPositionsInput() throws Exception {
     String testFile = "testola => the test";
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer());
-    parser.parse(new StringReader(testFile));
+    Analyzer analyzer = new EnglishAnalyzer();
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
+    try {
+      parser.parse(new StringReader(testFile));
+      fail("didn't get expected exception");
+    } catch (ParseException expected) {
+      // expected exc
+    }
+    analyzer.close();
   }
   
   /** parse a syn file with bad syntax */
-  @Test(expected=ParseException.class)
   public void testInvalidPositionsOutput() throws Exception {
     String testFile = "the test => testola";
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer());
-    parser.parse(new StringReader(testFile));
+    Analyzer analyzer = new EnglishAnalyzer();
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
+    try {
+      parser.parse(new StringReader(testFile));
+      fail("didn't get expected exception");
+    } catch (ParseException expected) {
+      // expected exc
+    }
+    analyzer.close();
   }
   
   /** parse a syn file with some escaped syntax chars */
@@ -117,10 +148,12 @@ public class TestSolrSynonymParser exten
     String testFile = 
       "a\\=>a => b\\=>b\n" +
       "a\\,a => b\\,b";
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false));
+    Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false);
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
     parser.parse(new StringReader(testFile));
     final SynonymMap map = parser.build();
-    Analyzer analyzer = new Analyzer() {
+    analyzer.close();
+    analyzer = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName) {
         Tokenizer tokenizer = new MockTokenizer(MockTokenizer.KEYWORD, false);
@@ -139,5 +172,6 @@ public class TestSolrSynonymParser exten
     assertAnalyzesTo(analyzer, "a,a",
         new String[] { "b,b" },
         new int[] { 1 });
+    analyzer.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java Thu Mar  5 17:09:12 2015
@@ -37,7 +37,6 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.MockGraphTokenFilter;
 import org.apache.lucene.analysis.core.KeywordTokenizer;
 import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
 import org.apache.lucene.util.TestUtil;
 
@@ -166,6 +165,7 @@ public class TestSynonymMapFilter extend
                      new int[] {1, 1},
                      true);
     checkAnalysisConsistency(random(), analyzer, false, "a b c");
+    analyzer.close();
   }
 
   public void testDoKeepOrig() throws Exception {
@@ -191,6 +191,7 @@ public class TestSynonymMapFilter extend
                      new int[] {1, 2, 1, 1},
                      true);
     checkAnalysisConsistency(random(), analyzer, false, "a b c");
+    analyzer.close();
   }
 
   public void testBasic() throws Exception {
@@ -502,6 +503,7 @@ public class TestSynonymMapFilter extend
       };
 
       checkRandomData(random(), analyzer, 100);
+      analyzer.close();
     }
   }
 
@@ -560,6 +562,7 @@ public class TestSynonymMapFilter extend
       };
 
       checkRandomData(random, analyzer, 100);
+      analyzer.close();
     }
   }
   
@@ -584,6 +587,7 @@ public class TestSynonymMapFilter extend
       };
 
       checkAnalysisConsistency(random, analyzer, random.nextBoolean(), "");
+      analyzer.close();
     }
   }
   
@@ -613,6 +617,7 @@ public class TestSynonymMapFilter extend
       };
 
       checkRandomData(random, analyzer, 100, 1024);
+      analyzer.close();
     }
   }
   
@@ -621,10 +626,11 @@ public class TestSynonymMapFilter extend
     String testFile = 
       "aaa => aaaa1 aaaa2 aaaa3\n" + 
       "bbb => bbbb1 bbbb2\n";
-      
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random()));
+    Analyzer synAnalyzer = new MockAnalyzer(random());
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, synAnalyzer);
     parser.parse(new StringReader(testFile));
     final SynonymMap map = parser.build();
+    synAnalyzer.close();
       
     Analyzer analyzer = new Analyzer() {
       @Override
@@ -642,6 +648,7 @@ public class TestSynonymMapFilter extend
     // xyzzy aaa pot of gold -> xyzzy aaaa1 aaaa2 aaaa3 gold
     assertAnalyzesTo(analyzer, "xyzzy aaa pot of gold",
                      new String[] { "xyzzy", "aaaa1", "pot", "aaaa2", "of", "aaaa3", "gold" });
+    analyzer.close();
   }
 
   public void testBasic2() throws Exception {
@@ -716,6 +723,7 @@ public class TestSynonymMapFilter extend
     assertAnalyzesTo(a, "z x c $",
         new String[] { "z", "xc", "$" },
         new int[] { 1, 1, 1 });
+    a.close();
   }
   
   public void testRepeatsOff() throws Exception {
@@ -736,6 +744,7 @@ public class TestSynonymMapFilter extend
     assertAnalyzesTo(a, "a b",
         new String[] { "ab" },
         new int[] { 1 });
+    a.close();
   }
   
   public void testRepeatsOn() throws Exception {
@@ -756,6 +765,7 @@ public class TestSynonymMapFilter extend
     assertAnalyzesTo(a, "a b",
         new String[] { "ab", "ab", "ab" },
         new int[] { 1, 0, 0 });
+    a.close();
   }
   
   public void testRecursion() throws Exception {
@@ -774,6 +784,7 @@ public class TestSynonymMapFilter extend
     assertAnalyzesTo(a, "zoo zoo $ zoo",
         new String[] { "zoo", "zoo", "$", "zoo" },
         new int[] { 1, 1, 1, 1 });
+    a.close();
   }
  
   public void testRecursion2() throws Exception {
@@ -794,6 +805,7 @@ public class TestSynonymMapFilter extend
     assertAnalyzesTo(a, "zoo zoo $ zoo",
         new String[] { "zoo", "zoo", "zoo", "zoo", "zoo", "$", "zoo", "zoo", "zoo", "zoo" },
         new int[] { 1, 0, 1, 0, 0, 1, 0, 1, 0, 1 });
+    a.close();
   }
 
   public void testOutputHangsOffEnd() throws Exception {
@@ -869,6 +881,7 @@ public class TestSynonymMapFilter extend
     assertAnalyzesTo(a, "z x c $",
         new String[] { "z", "x", "xc", "c", "$" },
         new int[] { 1, 1, 0, 1, 1 });
+    a.close();
   }
   
   public void testRecursion3() throws Exception {
@@ -887,6 +900,7 @@ public class TestSynonymMapFilter extend
     assertAnalyzesTo(a, "zoo zoo $ zoo",
         new String[] { "zoo", "zoo", "zoo", "$", "zoo" },
         new int[] { 1, 0, 1, 1, 1 });
+    a.close();
   }
   
   public void testRecursion4() throws Exception {
@@ -906,6 +920,7 @@ public class TestSynonymMapFilter extend
     assertAnalyzesTo(a, "zoo zoo $ zoo",
         new String[] { "zoo", "zoo", "zoo", "$", "zoo", "zoo", "zoo" },
         new int[] { 1, 0, 1, 1, 1, 0, 1 });
+    a.close();
   }
   
   public void testMultiwordOffsets() throws Exception {
@@ -926,6 +941,7 @@ public class TestSynonymMapFilter extend
         new int[] { 0, 0, 9, 16 },
         new int[] { 8, 22, 15, 22 },
         new int[] { 1, 0, 1, 1 });
+    a.close();
   }
 
   public void testEmpty() throws Exception {

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java Thu Mar  5 17:09:12 2015
@@ -17,7 +17,6 @@
 
 package org.apache.lucene.analysis.synonym;
 
-import java.io.Reader;
 import java.io.StringReader;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -40,11 +39,13 @@ public class TestWordnetSynonymParser ex
     "s(100000004,2,'king''s meany',n,1,1).\n";
   
   public void testSynonyms() throws Exception {
-    WordnetSynonymParser parser = new WordnetSynonymParser(true, true, new MockAnalyzer(random()));
+    Analyzer analyzer = new MockAnalyzer(random());
+    WordnetSynonymParser parser = new WordnetSynonymParser(true, true, analyzer);
     parser.parse(new StringReader(synonymsFile));
     final SynonymMap map = parser.build();
+    analyzer.close();
     
-    Analyzer analyzer = new Analyzer() {
+    analyzer = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName) {
         Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
@@ -66,5 +67,6 @@ public class TestWordnetSynonymParser ex
     /* multi words */
     assertAnalyzesTo(analyzer, "king's evil",
         new String[] { "king's", "king's", "evil", "meany" });
+    analyzer.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java Thu Mar  5 17:09:12 2015
@@ -17,15 +17,9 @@ package org.apache.lucene.analysis.th;
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.io.Reader;
-import java.util.Random;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.KeywordTokenizer;
 import org.apache.lucene.analysis.core.StopAnalyzer;
 import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
 import org.apache.lucene.analysis.util.CharArraySet;
@@ -47,18 +41,22 @@ public class TestThaiAnalyzer extends Ba
    * testcase for offsets
    */
   public void testOffsets() throws Exception {
-    assertAnalyzesTo(new ThaiAnalyzer(CharArraySet.EMPTY_SET), "การที่ได้ต้องแสดงว่างานดี",
+    Analyzer analyzer = new ThaiAnalyzer(CharArraySet.EMPTY_SET);
+    assertAnalyzesTo(analyzer, "การที่ได้ต้องแสดงว่างานดี",
         new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" },
         new int[] { 0, 3, 6, 9, 13, 17, 20, 23 },
         new int[] { 3, 6, 9, 13, 17, 20, 23, 25 });
+    analyzer.close();
   }
   
   public void testStopWords() throws Exception {
-    assertAnalyzesTo(new ThaiAnalyzer(), "การที่ได้ต้องแสดงว่างานดี",
+    Analyzer analyzer = new ThaiAnalyzer();
+    assertAnalyzesTo(analyzer, "การที่ได้ต้องแสดงว่างานดี",
         new String[] { "แสดง", "งาน", "ดี" },
         new int[] { 13, 20, 23 },
         new int[] { 17, 23, 25 },
         new int[] { 5, 2, 1 });
+    analyzer.close();
   }
   
   /*
@@ -79,32 +77,37 @@ public class TestThaiAnalyzer extends Ba
         new int[] { 0, 3, 6, 9, 17, 21, 24, 27 },
         new int[] { 3, 6, 9, 13, 21, 24, 27, 29 },
         new int[] { 1, 1, 1, 1, 2, 1, 1, 1 });
+    analyzer.close();
   }
   
   public void testReusableTokenStream() throws Exception {
     ThaiAnalyzer analyzer = new ThaiAnalyzer(CharArraySet.EMPTY_SET);
     assertAnalyzesTo(analyzer, "", new String[] {});
-
-      assertAnalyzesTo(
-          analyzer,
-          "การที่ได้ต้องแสดงว่างานดี",
-          new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี"});
-
-      assertAnalyzesTo(
-          analyzer,
-          "บริษัทชื่อ XY&Z - คุยกับ xyz@demo.com",
-          new String[] { "บริษัท", "ชื่อ", "xy", "z", "คุย", "กับ", "xyz", "demo.com" });
+    
+    assertAnalyzesTo(
+        analyzer,
+        "การที่ได้ต้องแสดงว่างานดี",
+        new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี"});
+    
+    assertAnalyzesTo(
+        analyzer,
+        "บริษัทชื่อ XY&Z - คุยกับ xyz@demo.com",
+        new String[] { "บริษัท", "ชื่อ", "xy", "z", "คุย", "กับ", "xyz", "demo.com" });
+    analyzer.close();
   }
   
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new ThaiAnalyzer(), 1000*RANDOM_MULTIPLIER);
+    Analyzer analyzer = new ThaiAnalyzer();
+    checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
+    analyzer.close();
   }
   
   /** blast some random large strings through the analyzer */
   public void testRandomHugeStrings() throws Exception {
-    Random random = random();
-    checkRandomData(random, new ThaiAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
+    Analyzer analyzer = new ThaiAnalyzer();
+    checkRandomData(random(), analyzer, 100*RANDOM_MULTIPLIER, 8192);
+    analyzer.close();
   }
   
   // LUCENE-3044
@@ -117,16 +120,19 @@ public class TestThaiAnalyzer extends Ba
     ts = analyzer.tokenStream("dummy", "ภาษาไทย");
     ts.addAttribute(FlagsAttribute.class);
     assertTokenStreamContents(ts, new String[] { "ภาษา", "ไทย" });
+    analyzer.close();
   }
   
   public void testTwoSentences() throws Exception {
-    assertAnalyzesTo(new ThaiAnalyzer(CharArraySet.EMPTY_SET), "This is a test. การที่ได้ต้องแสดงว่างานดี",
+    Analyzer analyzer = new ThaiAnalyzer(CharArraySet.EMPTY_SET);
+    assertAnalyzesTo(analyzer, "This is a test. การที่ได้ต้องแสดงว่างานดี",
           new String[] { "this", "is", "a", "test", "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" },
           new int[] { 0, 5, 8, 10, 16, 19, 22, 25, 29, 33, 36, 39 },
           new int[] { 4, 7, 9, 14, 19, 22, 25, 29, 33, 36, 39, 41 });
+    analyzer.close();
   }
 
-  public void testBackcompat40() throws IOException {
+  public void testBackcompat40() throws Exception {
     ThaiAnalyzer a = new ThaiAnalyzer();
     a.setVersion(Version.LUCENE_4_6_1);
     // this is just a test to see the correct unicode version is being used, not actually testing hebrew

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java Thu Mar  5 17:09:12 2015
@@ -28,7 +28,7 @@ public class TestTurkishAnalyzer extends
   /** This test fails with NPE when the 
    * stopwords file is missing in classpath */
   public void testResourcesAvailable() {
-    new TurkishAnalyzer();
+    new TurkishAnalyzer().close();
   }
   
   /** test stopwords and stemming */
@@ -42,6 +42,7 @@ public class TestTurkishAnalyzer extends
     // apostrophes
     checkOneTerm(a, "Kıbrıs'ta", "kıbrıs");
     assertAnalyzesTo(a, "Van Gölü'ne", new String[]{"van", "göl"});
+    a.close();
   }
   
   /** test use of exclusion set */
@@ -50,11 +51,14 @@ public class TestTurkishAnalyzer extends
     Analyzer a = new TurkishAnalyzer(TurkishAnalyzer.getDefaultStopSet(), exclusionSet);
     checkOneTerm(a, "ağacı", "ağacı");
     checkOneTerm(a, "ağaç", "ağaç");
+    a.close();
   }
   
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new TurkishAnalyzer(), 1000*RANDOM_MULTIPLIER);
+    Analyzer analyzer = new TurkishAnalyzer();
+    checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
+    analyzer.close();
   }
 
   public void testBackcompat40() throws IOException {

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java Thu Mar  5 17:09:12 2015
@@ -18,12 +18,9 @@ package org.apache.lucene.analysis.tr;
  */
 
 import java.io.IOException;
-import java.io.Reader;
-import java.io.StringReader;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.KeywordTokenizer;
@@ -80,5 +77,6 @@ public class TestTurkishLowerCaseFilter
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java Thu Mar  5 17:09:12 2015
@@ -141,6 +141,7 @@ public class TestCharTokenizers extends
     }
     // just for fun
     checkRandomData(random(), analyzer, num);
+    analyzer.close();
   }
   
   // LUCENE-3642: normalize BMP->SMP and check that offsets are correct
@@ -179,5 +180,6 @@ public class TestCharTokenizers extends
     }
     // just for fun
     checkRandomData(random(), analyzer, num);
+    analyzer.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java Thu Mar  5 17:09:12 2015
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.util;
  */
 
 import java.io.IOException;
-import java.io.Reader;
 import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.List;
@@ -71,6 +70,7 @@ public class TestElision extends BaseTok
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java Thu Mar  5 17:09:12 2015
@@ -27,22 +27,35 @@ import org.apache.lucene.analysis.BaseTo
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.util.IOUtils;
 
 /** Basic tests for {@link SegmentingTokenizerBase} */
 public class TestSegmentingTokenizerBase extends BaseTokenStreamTestCase {
-  private Analyzer sentence = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      return new TokenStreamComponents(new WholeSentenceTokenizer());
-    }
-  };
+  private Analyzer sentence, sentenceAndWord;
   
-  private Analyzer sentenceAndWord = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      return new TokenStreamComponents(new SentenceAndWordTokenizer());
-    }
-  };
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    sentence = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        return new TokenStreamComponents(new WholeSentenceTokenizer());
+      }
+    };
+    sentenceAndWord = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        return new TokenStreamComponents(new SentenceAndWordTokenizer());
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(sentence, sentenceAndWord);
+    super.tearDown();
+  }
+
   
   /** Some simple examples, just outputting the whole sentence boundaries as "terms" */
   public void testBasics() throws IOException {

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java Thu Mar  5 17:09:12 2015
@@ -194,6 +194,7 @@ public class WikipediaTokenizerTest exte
     };
     // TODO: properly support positionLengthAttribute
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER, 20, false, false);
+    a.close();
   }
   
   /** blast some random large strings through the analyzer */
@@ -209,5 +210,6 @@ public class WikipediaTokenizerTest exte
     };
     // TODO: properly support positionLengthAttribute
     checkRandomData(random, a, 100*RANDOM_MULTIPLIER, 8192, false, false);
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java Thu Mar  5 17:09:12 2015
@@ -27,7 +27,6 @@ import org.apache.lucene.index.IndexRead
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.DocValuesRangeQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;

Modified: lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java Thu Mar  5 17:09:12 2015
@@ -17,7 +17,6 @@ package org.apache.lucene.collation;
  * limitations under the License.
  */
 
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.CollationTestBase;
 import org.apache.lucene.util.BytesRef;
@@ -25,17 +24,24 @@ import org.apache.lucene.util.BytesRef;
 import java.text.Collator;
 import java.util.Locale;
 
-public class TestCollationKeyAnalyzer extends CollationTestBase {
-  // the sort order of Ø versus U depends on the version of the rules being used
-  // for the inherited root locale: Ø's order isnt specified in Locale.US since 
-  // it's not used in english.
-  private boolean oStrokeFirst = Collator.getInstance(new Locale("")).compare("Ø", "U") < 0;
-  
+public class TestCollationKeyAnalyzer extends CollationTestBase { 
   // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
   // RuleBasedCollator.  However, the Arabic Locale seems to order the Farsi
   // characters properly.
   private Collator collator = Collator.getInstance(new Locale("ar"));
-  private Analyzer analyzer = new CollationKeyAnalyzer(collator);
+  private Analyzer analyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new CollationKeyAnalyzer(collator);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    analyzer.close();
+    super.tearDown();
+  }
 
   private BytesRef firstRangeBeginning = new BytesRef(collator.getCollationKey(firstRangeBeginningOriginal).toByteArray());
   private BytesRef firstRangeEnd = new BytesRef(collator.getCollationKey(firstRangeEndOriginal).toByteArray());
@@ -65,7 +71,9 @@ public class TestCollationKeyAnalyzer ex
     for (int i = 0; i < iters; i++) {
       Collator collator = Collator.getInstance(Locale.GERMAN);
       collator.setStrength(Collator.PRIMARY);
-      assertThreadSafe(new CollationKeyAnalyzer(collator));
+      Analyzer analyzer = new CollationKeyAnalyzer(collator);
+      assertThreadSafe(analyzer);
+      analyzer.close();
     }
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java Thu Mar  5 17:09:12 2015
@@ -18,22 +18,37 @@ package org.apache.lucene.analysis.icu;
  */
 
 import java.io.IOException;
-import java.io.Reader;
 
-import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.KeywordTokenizer;
 
 /**
  * Tests ICUFoldingFilter
  */
 public class TestICUFoldingFilter extends BaseTokenStreamTestCase {
-  Analyzer a = new Analyzer() {
-    @Override
-    public TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-      return new TokenStreamComponents(tokenizer, new ICUFoldingFilter(tokenizer));
-    }
-  };
+  Analyzer a;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    a = new Analyzer() {
+      @Override
+      public TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new ICUFoldingFilter(tokenizer));
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    a.close();
+    super.tearDown();
+  }
+  
   public void testDefaults() throws IOException {
     // case folding
     assertAnalyzesTo(a, "This is a test", new String[] { "this", "is", "a", "test" });
@@ -88,5 +103,6 @@ public class TestICUFoldingFilter extend
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java Thu Mar  5 17:09:12 2015
@@ -129,6 +129,7 @@ public class TestICUNormalizer2CharFilte
       }
       checkOneTerm(a, input, normalized);
     }
+    a.close();
   }
 
   public void testNFC() throws Exception {
@@ -187,6 +188,7 @@ public class TestICUNormalizer2CharFilte
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
     // huge strings
     checkRandomData(random(), a, 25*RANDOM_MULTIPLIER, 8192);
+    a.close();
 
     // nfkd
     a = new Analyzer() {
@@ -203,6 +205,7 @@ public class TestICUNormalizer2CharFilte
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
     // huge strings
     checkRandomData(random(), a, 25*RANDOM_MULTIPLIER, 8192);
+    a.close();
   }
   
   public void testCuriousString() throws Exception {
@@ -221,6 +224,7 @@ public class TestICUNormalizer2CharFilte
     for (int i = 0; i < 1000; i++) {
       checkAnalysisConsistency(random(), a, false, text);
     }
+    a.close();
   }
   
   public void testCuriousMassiveString() throws Exception {
@@ -411,5 +415,6 @@ public class TestICUNormalizer2CharFilte
     for (int i = 0; i < 25; i++) {
       checkAnalysisConsistency(random(), a, false, text);
     }
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java Thu Mar  5 17:09:12 2015
@@ -18,9 +18,11 @@ package org.apache.lucene.analysis.icu;
  */
 
 import java.io.IOException;
-import java.io.Reader;
 
-import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.KeywordTokenizer;
 
 import com.ibm.icu.text.Normalizer2;
@@ -29,13 +31,25 @@ import com.ibm.icu.text.Normalizer2;
  * Tests the ICUNormalizer2Filter
  */
 public class TestICUNormalizer2Filter extends BaseTokenStreamTestCase {
-  Analyzer a = new Analyzer() {
-    @Override
-    public TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-      return new TokenStreamComponents(tokenizer, new ICUNormalizer2Filter(tokenizer));
-    }
-  };
+  Analyzer a;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    a = new Analyzer() {
+      @Override
+      public TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new ICUNormalizer2Filter(tokenizer));
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    a.close();
+    super.tearDown();
+  }
 
   public void testDefaults() throws IOException {
     // case folding
@@ -72,6 +86,7 @@ public class TestICUNormalizer2Filter ex
     
     // decompose EAcute into E + combining Acute
     assertAnalyzesTo(a, "\u00E9", new String[] { "\u0065\u0301" });
+    a.close();
   }
   
   /** blast some random strings through the analyzer */
@@ -88,5 +103,6 @@ public class TestICUNormalizer2Filter ex
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java Thu Mar  5 17:09:12 2015
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.icu;
  */
 
 import java.io.IOException;
-import java.io.Reader;
 import java.io.StringReader;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -105,6 +104,7 @@ public class TestICUTransformFilter exte
       }
     };
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+    a.close();
   }
   
   public void testEmptyTerm() throws IOException {
@@ -116,5 +116,6 @@ public class TestICUTransformFilter exte
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java Thu Mar  5 17:09:12 2015
@@ -67,14 +67,26 @@ public class TestICUTokenizer extends Ba
     assertTokenStreamContents(tokenizer, expected);
   }
   
-  private Analyzer a = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(false));
-      TokenFilter filter = new ICUNormalizer2Filter(tokenizer);
-      return new TokenStreamComponents(tokenizer, filter);
-    }
-  };
+  private Analyzer a; 
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    a = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(false));
+        TokenFilter filter = new ICUNormalizer2Filter(tokenizer);
+        return new TokenStreamComponents(tokenizer, filter);
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    a.close();
+    super.tearDown();
+  }
 
   public void testArmenian() throws Exception {
     assertAnalyzesTo(a, "Վիքիպեդիայի 13 միլիոն հոդվածները (4,600` հայերեն վիքիպեդիայում) գրվել են կամավորների կողմից ու համարյա բոլոր հոդվածները կարող է խմբագրել ցանկաց մարդ ով կարող է բացել Վիքիպեդիայի կայքը։",

Modified: lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java Thu Mar  5 17:09:12 2015
@@ -17,7 +17,6 @@ package org.apache.lucene.analysis.icu.s
  * limitations under the License.
  */
 
-import java.io.Reader;
 import java.util.Random;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -27,12 +26,24 @@ import org.apache.lucene.analysis.BaseTo
  * test ICUTokenizer with dictionary-based CJ segmentation
  */
 public class TestICUTokenizerCJK extends BaseTokenStreamTestCase {
-  Analyzer a = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      return new TokenStreamComponents(new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(true)));
-    }
-  };
+  Analyzer a;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    a = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        return new TokenStreamComponents(new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(true)));
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    a.close();
+    super.tearDown();
+  }
   
   /**
    * test stolen from smartcn

Modified: lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java Thu Mar  5 17:09:12 2015
@@ -27,6 +27,7 @@ import org.apache.lucene.analysis.cjk.CJ
 import org.apache.lucene.analysis.core.StopFilter;
 import org.apache.lucene.analysis.icu.ICUNormalizer2Filter;
 import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.util.IOUtils;
 
 /**
  * Tests ICUTokenizer's ability to work with CJKBigramFilter.
@@ -34,35 +35,46 @@ import org.apache.lucene.analysis.util.C
  */
 public class TestWithCJKBigramFilter extends BaseTokenStreamTestCase {
   
-  /**
-   * ICUTokenizer+CJKBigramFilter
-   */
-  private Analyzer analyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer source = new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(false));
-      TokenStream result = new CJKBigramFilter(source);
-      return new TokenStreamComponents(source, new StopFilter(result, CharArraySet.EMPTY_SET));
-    }
-  };
+  Analyzer analyzer, analyzer2;
   
-  /**
-   * ICUTokenizer+ICUNormalizer2Filter+CJKBigramFilter.
-   * 
-   * ICUNormalizer2Filter uses nfkc_casefold by default, so this is a language-independent
-   * superset of CJKWidthFilter's foldings.
-   */
-  private Analyzer analyzer2 = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer source = new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(false));
-      // we put this before the CJKBigramFilter, because the normalization might combine
-      // some halfwidth katakana forms, which will affect the bigramming.
-      TokenStream result = new ICUNormalizer2Filter(source);
-      result = new CJKBigramFilter(source);
-      return new TokenStreamComponents(source, new StopFilter(result, CharArraySet.EMPTY_SET));
-    }
-  };
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    /*
+     * ICUTokenizer+CJKBigramFilter
+     */
+    analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer source = new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(false));
+        TokenStream result = new CJKBigramFilter(source);
+        return new TokenStreamComponents(source, new StopFilter(result, CharArraySet.EMPTY_SET));
+      }
+    };
+    /*
+     * ICUTokenizer+ICUNormalizer2Filter+CJKBigramFilter.
+     * 
+     * ICUNormalizer2Filter uses nfkc_casefold by default, so this is a language-independent
+     * superset of CJKWidthFilter's foldings.
+     */
+    analyzer2 = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer source = new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(false));
+        // we put this before the CJKBigramFilter, because the normalization might combine
+        // some halfwidth katakana forms, which will affect the bigramming.
+        TokenStream result = new ICUNormalizer2Filter(source);
+        result = new CJKBigramFilter(result);
+        return new TokenStreamComponents(source, new StopFilter(result, CharArraySet.EMPTY_SET));
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(analyzer, analyzer2);
+    super.tearDown();
+  }
   
   public void testJa1() throws IOException {
     assertAnalyzesTo(analyzer, "一二三四五六七八九十",

Modified: lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyAnalyzer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyAnalyzer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyAnalyzer.java Thu Mar  5 17:09:12 2015
@@ -29,7 +29,19 @@ import java.util.Locale;
 public class TestICUCollationKeyAnalyzer extends CollationTestBase {
 
   private Collator collator = Collator.getInstance(new Locale("fa"));
-  private Analyzer analyzer = new ICUCollationKeyAnalyzer(collator);
+  private Analyzer analyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new ICUCollationKeyAnalyzer(collator);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    analyzer.close();
+    super.tearDown();
+  }
 
   private BytesRef firstRangeBeginning = new BytesRef
     (collator.getCollationKey(firstRangeBeginningOriginal).toByteArray());
@@ -62,7 +74,9 @@ public class TestICUCollationKeyAnalyzer
       Locale locale = Locale.GERMAN;
       Collator collator = Collator.getInstance(locale);
       collator.setStrength(Collator.IDENTICAL);
-      assertThreadSafe(new ICUCollationKeyAnalyzer(collator));
+      Analyzer a = new ICUCollationKeyAnalyzer(collator);
+      assertThreadSafe(a);
+      a.close();
     }
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java Thu Mar  5 17:09:12 2015
@@ -30,14 +30,25 @@ import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.UnicodeUtil;
 
 public class TestExtendedMode extends BaseTokenStreamTestCase {
-  private final Analyzer analyzer = new Analyzer() {
-    
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, Mode.EXTENDED);
-      return new TokenStreamComponents(tokenizer, tokenizer);
-    }
-  };
+  private Analyzer analyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, Mode.EXTENDED);
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    analyzer.close();
+    super.tearDown();
+  }
   
   /** simple test for supplementary characters */
   public void testSurrogates() throws IOException {

Modified: lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java Thu Mar  5 17:09:12 2015
@@ -31,7 +31,7 @@ public class TestJapaneseAnalyzer extend
   /** This test fails with NPE when the 
    * stopwords file is missing in classpath */
   public void testResourcesAvailable() {
-    new JapaneseAnalyzer();
+    new JapaneseAnalyzer().close();
   }
   
   /**
@@ -40,12 +40,14 @@ public class TestJapaneseAnalyzer extend
    * and offsets are correct.
    */
   public void testBasics() throws IOException {
-    assertAnalyzesTo(new JapaneseAnalyzer(), "多くの学生が試験に落ちた。",
+    Analyzer a = new JapaneseAnalyzer();
+    assertAnalyzesTo(a, "多くの学生が試験に落ちた。",
         new String[] { "多く", "学生", "試験", "落ちる" },
         new int[] { 0, 3, 6,  9 },
         new int[] { 2, 5, 8, 11 },
         new int[] { 1, 2, 2,  2 }
       );
+    a.close();
   }
 
   /**
@@ -53,7 +55,7 @@ public class TestJapaneseAnalyzer extend
    */
   public void testDecomposition() throws IOException {
 
-    final Analyzer a = new JapaneseAnalyzer(null, Mode.SEARCH,
+    Analyzer a = new JapaneseAnalyzer(null, Mode.SEARCH,
                                             JapaneseAnalyzer.getDefaultStopSet(),
                                             JapaneseAnalyzer.getDefaultStopTags());
 
@@ -108,7 +110,9 @@ public class TestJapaneseAnalyzer extend
                               );
 
     // Kyoto University Baseball Club
-    assertAnalyzesToPositions(new JapaneseAnalyzer(), "京都大学硬式野球部",
+    a.close();
+    a = new JapaneseAnalyzer();
+    assertAnalyzesToPositions(a, "京都大学硬式野球部",
                      new String[] { "京都大",
                                     "å­¦",
                                     "硬式",
@@ -117,6 +121,7 @@ public class TestJapaneseAnalyzer extend
                               new int[] {1, 1, 1, 1, 1},
                               new int[] {1, 1, 1, 1, 1});
     // toDotFile(a, "成田空港", "/mnt/scratch/out.dot");
+    a.close();
   }
 
   
@@ -129,6 +134,7 @@ public class TestJapaneseAnalyzer extend
                                             JapaneseAnalyzer.getDefaultStopSet(),
                                             JapaneseAnalyzer.getDefaultStopTags());
     checkRandomData(random, a, atLeast(1000));
+    a.close();
   }
   
   /** blast some random large strings through the analyzer */
@@ -138,6 +144,7 @@ public class TestJapaneseAnalyzer extend
         JapaneseAnalyzer.getDefaultStopSet(),
         JapaneseAnalyzer.getDefaultStopTags());
     checkRandomData(random, a, 2*RANDOM_MULTIPLIER, 8192);
+    a.close();
   }
 
   // Copied from TestJapaneseTokenizer, to make sure passing
@@ -154,6 +161,7 @@ public class TestJapaneseAnalyzer extend
                               new int[] { 1, 2, 4 },
                               new Integer(4)
     );
+    a.close();
   }
 
   // LUCENE-3897: this string (found by running all jawiki
@@ -165,6 +173,7 @@ public class TestJapaneseAnalyzer extend
                                             JapaneseAnalyzer.getDefaultStopSet(),
                                             JapaneseAnalyzer.getDefaultStopTags());
     checkAnalysisConsistency(random, a, random.nextBoolean(), s);
+    a.close();
   }
 
   // LUCENE-3897: this string (found by
@@ -176,6 +185,7 @@ public class TestJapaneseAnalyzer extend
                                             JapaneseAnalyzer.getDefaultStopSet(),
                                             JapaneseAnalyzer.getDefaultStopTags());
     checkAnalysisConsistency(random, a, random.nextBoolean(), s);
+    a.close();
   }
 
   // LUCENE-3897: this string (found by
@@ -187,6 +197,7 @@ public class TestJapaneseAnalyzer extend
                                             JapaneseAnalyzer.getDefaultStopSet(),
                                             JapaneseAnalyzer.getDefaultStopTags());
     checkAnalysisConsistency(random, a, random.nextBoolean(), s);
+    a.close();
   }
 
   public void test4thCuriousString() throws Exception {
@@ -194,8 +205,8 @@ public class TestJapaneseAnalyzer extend
     final Analyzer a = new JapaneseAnalyzer(null, Mode.SEARCH,
                                             JapaneseAnalyzer.getDefaultStopSet(),
                                             JapaneseAnalyzer.getDefaultStopTags());
-    Random random = random();
-    checkAnalysisConsistency(random, a, true, s);
+    checkAnalysisConsistency(random(), a, true, s);
+    a.close();
   }
 
   public void test5thCuriousString() throws Exception {
@@ -203,7 +214,7 @@ public class TestJapaneseAnalyzer extend
     final Analyzer a = new JapaneseAnalyzer(null, Mode.SEARCH,
                                             JapaneseAnalyzer.getDefaultStopSet(),
                                             JapaneseAnalyzer.getDefaultStopTags());
-    Random random = random();
-    checkAnalysisConsistency(random, a, false, s);
+    checkAnalysisConsistency(random(), a, false, s);
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilter.java Thu Mar  5 17:09:12 2015
@@ -28,13 +28,25 @@ import org.apache.lucene.analysis.miscel
 import org.apache.lucene.analysis.util.CharArraySet;
 
 public class TestJapaneseBaseFormFilter extends BaseTokenStreamTestCase {
-  private Analyzer analyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, JapaneseTokenizer.DEFAULT_MODE);
-      return new TokenStreamComponents(tokenizer, new JapaneseBaseFormFilter(tokenizer));
-    }
-  };
+  private Analyzer analyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, JapaneseTokenizer.DEFAULT_MODE);
+        return new TokenStreamComponents(tokenizer, new JapaneseBaseFormFilter(tokenizer));
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    analyzer.close();
+    super.tearDown();
+  }
   
   public void testBasics() throws IOException {
     assertAnalyzesTo(analyzer, "それはまだ実験段階にあります",
@@ -55,6 +67,7 @@ public class TestJapaneseBaseFormFilter
     assertAnalyzesTo(a, "それはまだ実験段階にあります",
         new String[] { "それ", "は", "まだ", "実験", "段階", "に", "あり", "ます"  }
     );
+    a.close();
   }
   
   public void testEnglish() throws IOException {
@@ -75,5 +88,6 @@ public class TestJapaneseBaseFormFilter
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilter.java Thu Mar  5 17:09:12 2015
@@ -22,38 +22,49 @@ import org.apache.lucene.analysis.BaseTo
 import org.apache.lucene.analysis.CharFilter;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.util.IOUtils;
 
 import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
 
 public class TestJapaneseIterationMarkCharFilter extends BaseTokenStreamTestCase {
-
-  private Analyzer keywordAnalyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new MockTokenizer(MockTokenizer.KEYWORD, false);
-      return new TokenStreamComponents(tokenizer, tokenizer);
-    }
-
-    @Override
-    protected Reader initReader(String fieldName, Reader reader) {
-      return new JapaneseIterationMarkCharFilter(reader);
-    }
-  };
-
-  private Analyzer japaneseAnalyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, false, JapaneseTokenizer.Mode.SEARCH);
-      return new TokenStreamComponents(tokenizer, tokenizer);
-    }
-
-    @Override
-    protected Reader initReader(String fieldName, Reader reader) {
-      return new JapaneseIterationMarkCharFilter(reader);
-    }
-  };
+  private Analyzer keywordAnalyzer, japaneseAnalyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    keywordAnalyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.KEYWORD, false);
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+
+      @Override
+      protected Reader initReader(String fieldName, Reader reader) {
+        return new JapaneseIterationMarkCharFilter(reader);
+      }
+    };
+    japaneseAnalyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, false, JapaneseTokenizer.Mode.SEARCH);
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+
+      @Override
+      protected Reader initReader(String fieldName, Reader reader) {
+        return new JapaneseIterationMarkCharFilter(reader);
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(keywordAnalyzer, japaneseAnalyzer);
+    super.tearDown();
+  }
   
   public void testKanji() throws IOException {
     // Test single repetition

Modified: lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java Thu Mar  5 17:09:12 2015
@@ -32,14 +32,26 @@ import java.io.IOException;
  * Tests for {@link JapaneseKatakanaStemFilter}
  */
 public class TestJapaneseKatakanaStemFilter extends BaseTokenStreamTestCase {
-  private Analyzer analyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      // Use a MockTokenizer here since this filter doesn't really depend on Kuromoji
-      Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-      return new TokenStreamComponents(source, new JapaneseKatakanaStemFilter(source));
-    }
-  };
+  private Analyzer analyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        // Use a MockTokenizer here since this filter doesn't really depend on Kuromoji
+        Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(source, new JapaneseKatakanaStemFilter(source));
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    analyzer.close();
+    super.tearDown();
+  }
   
   /**
    * Test a few common katakana spelling variations.
@@ -73,6 +85,7 @@ public class TestJapaneseKatakanaStemFil
       }
     };
     checkOneTerm(a, "コーヒー", "コーヒー");
+    a.close();
   }
 
   public void testUnsupportedHalfWidthVariants() throws IOException {
@@ -93,5 +106,6 @@ public class TestJapaneseKatakanaStemFil
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilter.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilter.java Thu Mar  5 17:09:12 2015
@@ -23,31 +23,41 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.cjk.CJKWidthFilter;
 import org.apache.lucene.analysis.core.KeywordTokenizer;
+import org.apache.lucene.util.IOUtils;
 
 import java.io.IOException;
-import java.io.Reader;
 import java.util.Random;
 
 /**
  * Tests for {@link TestJapaneseReadingFormFilter}
  */
 public class TestJapaneseReadingFormFilter extends BaseTokenStreamTestCase {
-  private Analyzer katakanaAnalyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, JapaneseTokenizer.Mode.SEARCH);
-      return new TokenStreamComponents(tokenizer, new JapaneseReadingFormFilter(tokenizer, false));
-    }
-  };
-
-  private Analyzer romajiAnalyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, JapaneseTokenizer.Mode.SEARCH);
-      return new TokenStreamComponents(tokenizer, new JapaneseReadingFormFilter(tokenizer, true));
-    }
-  };
-
+  private Analyzer katakanaAnalyzer, romajiAnalyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    katakanaAnalyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, JapaneseTokenizer.Mode.SEARCH);
+        return new TokenStreamComponents(tokenizer, new JapaneseReadingFormFilter(tokenizer, false));
+      }
+    };
+    romajiAnalyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, JapaneseTokenizer.Mode.SEARCH);
+        return new TokenStreamComponents(tokenizer, new JapaneseReadingFormFilter(tokenizer, true));
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(katakanaAnalyzer, romajiAnalyzer);
+    super.tearDown();
+  }
 
   public void testKatakanaReadings() throws IOException {
     assertAnalyzesTo(katakanaAnalyzer, "今夜はロバート先生と話した",
@@ -67,6 +77,7 @@ public class TestJapaneseReadingFormFilt
     assertAnalyzesTo(a, "今夜はロバート先生と話した",
         new String[] { "コンヤ", "ハ", "ロバート", "センセイ", "ト", "ハナシ", "タ" }
     );
+    a.close();
   }
 
   public void testRomajiReadings() throws IOException {
@@ -87,6 +98,7 @@ public class TestJapaneseReadingFormFilt
     assertAnalyzesTo(a, "今夜はロバート先生と話した",
         new String[] { "kon'ya", "ha", "robato", "sensei", "to", "hanashi", "ta" }
     );
+    a.close();
   }
 
   public void testRandomData() throws IOException {
@@ -104,5 +116,6 @@ public class TestJapaneseReadingFormFilt
       }
     };
     checkOneTerm(a, "", "");
+    a.close();
   }
 }

Modified: lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java Thu Mar  5 17:09:12 2015
@@ -35,9 +35,9 @@ import org.apache.lucene.analysis.ja.dic
 import org.apache.lucene.analysis.ja.dict.UserDictionary;
 import org.apache.lucene.analysis.ja.tokenattributes.*;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.UnicodeUtil;
-import org.apache.lucene.util.LuceneTestCase.Slow;
 
 public class TestJapaneseTokenizer extends BaseTokenStreamTestCase {
 
@@ -57,38 +57,47 @@ public class TestJapaneseTokenizer exten
       throw new RuntimeException(ioe);
     }
   }
-
-  private Analyzer analyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), false, Mode.SEARCH);
-      return new TokenStreamComponents(tokenizer, tokenizer);
-    }
-  };
-
-  private Analyzer analyzerNormal = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), false, Mode.NORMAL);
-      return new TokenStreamComponents(tokenizer, tokenizer);
-    }
-  };
-
-  private Analyzer analyzerNoPunct = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), true, Mode.SEARCH);
-      return new TokenStreamComponents(tokenizer, tokenizer);
-    }
-  };
-
-  private Analyzer extendedModeAnalyzerNoPunct = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), true, Mode.EXTENDED);
-      return new TokenStreamComponents(tokenizer, tokenizer);
-    }
-  };
+  
+  private Analyzer analyzer, analyzerNormal, analyzerNoPunct, extendedModeAnalyzerNoPunct;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), false, Mode.SEARCH);
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+    };
+    analyzerNormal = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), false, Mode.NORMAL);
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+    };
+    analyzerNoPunct = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), true, Mode.SEARCH);
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+    };
+    extendedModeAnalyzerNoPunct = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), true, Mode.EXTENDED);
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(analyzer, analyzerNormal, analyzerNoPunct, extendedModeAnalyzerNoPunct);
+    super.tearDown();
+  }
 
   public void testNormalMode() throws Exception {
     assertAnalyzesTo(analyzerNormal,
@@ -197,16 +206,16 @@ public class TestJapaneseTokenizer exten
   public void testRandomHugeStringsMockGraphAfter() throws Exception {
     // Randomly inject graph tokens after JapaneseTokenizer:
     Random random = random();
-    checkRandomData(random,
-                    new Analyzer() {
-                      @Override
-                      protected TokenStreamComponents createComponents(String fieldName) {
-                        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), false, Mode.SEARCH);
-                        TokenStream graph = new MockGraphTokenFilter(random(), tokenizer);
-                        return new TokenStreamComponents(tokenizer, graph);
-                      }
-                    },
-                    20*RANDOM_MULTIPLIER, 8192);
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), readDict(), false, Mode.SEARCH);
+        TokenStream graph = new MockGraphTokenFilter(random(), tokenizer);
+        return new TokenStreamComponents(tokenizer, graph);
+      }
+    };
+    checkRandomData(random, analyzer, 20*RANDOM_MULTIPLIER, 8192);
+    analyzer.close();
   }
 
   public void testLargeDocReliability() throws Exception {
@@ -367,6 +376,7 @@ public class TestJapaneseTokenizer exten
                      surfaceForms);
     
     assertTrue(gv2.finish().indexOf("22.0") != -1);
+    analyzer.close();
   }
 
   private void assertReadings(String input, String... readings) throws IOException {

Modified: lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestSearchMode.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestSearchMode.java?rev=1664421&r1=1664420&r2=1664421&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestSearchMode.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestSearchMode.java Thu Mar  5 17:09:12 2015
@@ -31,13 +31,25 @@ import org.apache.lucene.analysis.ja.Jap
 
 public class TestSearchMode extends BaseTokenStreamTestCase {
   private final static String SEGMENTATION_FILENAME = "search-segmentation-tests.txt";
-  private final Analyzer analyzer = new Analyzer() {
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, Mode.SEARCH);
-      return new TokenStreamComponents(tokenizer, tokenizer);
-    }
-  };
+  private Analyzer analyzer;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new JapaneseTokenizer(newAttributeFactory(), null, true, Mode.SEARCH);
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+    };
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    analyzer.close();
+    super.tearDown();
+  }
 
   /** Test search mode segmentation */
   public void testSearchSegmentation() throws IOException {



Mime
View raw message