lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [06/50] [abbrv] lucenenet git commit: Lucene.Net.Analysis.Analyzer refactor: Renamed TokenStream() > GetTokenStream() for consistency
Date Sun, 05 Mar 2017 11:48:45 GMT
Lucene.Net.Analysis.Analyzer refactor: Renamed TokenStream() > GetTokenStream() for consistency


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/b5cae3f3
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/b5cae3f3
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/b5cae3f3

Branch: refs/heads/api-work
Commit: b5cae3f3fbf163368fed1f5ec6ce231d01a1e72e
Parents: 0f87ced
Author: Shad Storhaug <shad@shadstorhaug.com>
Authored: Fri Mar 3 08:37:30 2017 +0700
Committer: Shad Storhaug <shad@shadstorhaug.com>
Committed: Sun Mar 5 17:08:29 2017 +0700

----------------------------------------------------------------------
 .../Analysis/Miscellaneous/PatternAnalyzer.cs   |  4 +-
 .../Analysis/Synonym/SynonymMap.cs              |  2 +-
 .../SimpleNaiveBayesClassifier.cs               |  2 +-
 src/Lucene.Net.Core/Analysis/Analyzer.cs        | 22 +++----
 .../Analysis/ReusableStringReader.cs            |  2 +-
 src/Lucene.Net.Core/Document/Field.cs           |  4 +-
 src/Lucene.Net.Core/Util/QueryBuilder.cs        |  2 +-
 .../Highlight/Highlighter.cs                    |  4 +-
 .../Highlight/TokenSources.cs                   |  2 +-
 .../PostingsHighlight/PostingsHighlighter.cs    |  2 +-
 src/Lucene.Net.Memory/MemoryIndex.cs            |  2 +-
 src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs      |  2 +-
 .../Analyzing/AnalyzingQueryParser.cs           |  2 +-
 .../Classic/QueryParserBase.cs                  |  2 +-
 .../Processors/AnalyzerQueryNodeProcessor.cs    |  2 +-
 .../Xml/Builders/LikeThisQueryBuilder.cs        |  2 +-
 .../Xml/Builders/SpanOrTermsBuilder.cs          |  2 +-
 .../Xml/Builders/TermsFilterBuilder.cs          |  2 +-
 .../Xml/Builders/TermsQueryBuilder.cs           |  2 +-
 .../Queries/FuzzyLikeThisQuery.cs               |  2 +-
 .../Analyzing/AnalyzingInfixSuggester.cs        |  4 +-
 .../Suggest/Analyzing/AnalyzingSuggester.cs     |  4 +-
 .../Suggest/Analyzing/FreeTextSuggester.cs      |  2 +-
 .../Analysis/BaseTokenStreamTestCase.cs         | 24 ++++----
 .../Analysis/CollationTestBase.cs               | 12 ++--
 .../Analysis/Core/TestDuelingAnalyzers.cs       | 12 ++--
 .../Analysis/Core/TestKeywordAnalyzer.cs        |  2 +-
 .../Analysis/Core/TestStopAnalyzer.cs           |  6 +-
 .../Miscellaneous/PatternAnalyzerTest.cs        |  4 +-
 .../TestLimitTokenCountAnalyzer.cs              |  8 +--
 .../TestLimitTokenPositionFilter.cs             |  8 +--
 .../TestPerFieldAnalyzerWrapper.cs              |  4 +-
 .../Query/QueryAutoStopWordAnalyzerTest.cs      | 22 +++----
 .../Shingle/ShingleAnalyzerWrapperTest.cs       |  4 +-
 .../Analysis/Sinks/TestTeeSinkTokenFilter.cs    |  2 +-
 .../Analysis/Th/TestThaiAnalyzer.cs             |  4 +-
 .../Analysis/Util/TestCharTokenizers.cs         |  4 +-
 .../Pl/TestPolishAnalyzer.cs                    |  2 +-
 .../Custom/HighlightCustomQueryTest.cs          |  2 +-
 .../Highlight/HighlighterTest.cs                | 62 ++++++++++----------
 .../VectorHighlight/AbstractTestCase.cs         |  2 +-
 .../Index/Memory/MemoryIndexTest.cs             |  4 +-
 .../Analyzing/AnalyzingInfixSuggesterTest.cs    |  2 +-
 .../Analysis/TestMockAnalyzer.cs                | 12 ++--
 src/Lucene.Net.Tests/Document/TestDocument.cs   |  2 +-
 .../Index/TestIndexableField.cs                 |  2 +-
 src/Lucene.Net.Tests/Index/TestLongPostings.cs  |  2 +-
 .../Index/TestTermVectorsWriter.cs              |  2 +-
 src/Lucene.Net.Tests/Search/TestPhraseQuery.cs  |  2 +-
 49 files changed, 144 insertions(+), 144 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
index 59ce195..4b5da76 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
@@ -219,8 +219,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
 
         /// <summary>
         /// Creates a token stream that tokenizes all the text in the given SetReader;
-        /// This implementation forwards to <see cref="Analyzer.TokenStream(string, TextReader)"/> and is
-        /// less efficient than <see cref="Analyzer.TokenStream(string, TextReader)"/>.
+        /// This implementation forwards to <see cref="Analyzer.GetTokenStream(string, TextReader)"/> and is
+        /// less efficient than <see cref="Analyzer.GetTokenStream(string, TextReader)"/>.
         /// </summary>
         /// <param name="fieldName">
         ///            the name of the field to tokenize (currently ignored). </param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
index 8fd98ce..9184a19 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
@@ -360,7 +360,7 @@ namespace Lucene.Net.Analysis.Synonym
             public virtual CharsRef Analyze(string text, CharsRef reuse)
             {
                 IOException priorException = null;
-                TokenStream ts = analyzer.TokenStream("", text);
+                TokenStream ts = analyzer.GetTokenStream("", text);
                 try
                 {
                     var termAtt = ts.AddAttribute<ICharTermAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs b/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs
index 7172e23..638c428 100644
--- a/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs
+++ b/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs
@@ -112,7 +112,7 @@ namespace Lucene.Net.Classification
         {
             ICollection<string> result = new LinkedList<string>();
             foreach (string textFieldName in _textFieldNames) {
-                TokenStream tokenStream = _analyzer.TokenStream(textFieldName, new StringReader(doc));
+                TokenStream tokenStream = _analyzer.GetTokenStream(textFieldName, new StringReader(doc));
                 try 
                 {
                     ICharTermAttribute charTermAttribute = tokenStream.AddAttribute<ICharTermAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Core/Analysis/Analyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/Analyzer.cs b/src/Lucene.Net.Core/Analysis/Analyzer.cs
index bb31036..51f0ab1 100644
--- a/src/Lucene.Net.Core/Analysis/Analyzer.cs
+++ b/src/Lucene.Net.Core/Analysis/Analyzer.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis
     /// <para/>
     /// In order to define what analysis is done, subclasses must define their
     /// <see cref="TokenStreamComponents"/> in <see cref="CreateComponents(string, TextReader)"/>.
-    /// The components are then reused in each call to <see cref="TokenStream(string, TextReader)"/>.
+    /// The components are then reused in each call to <see cref="GetTokenStream(string, TextReader)"/>.
     /// <para/>
     /// Simple example:
     /// <code>
@@ -73,7 +73,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Create a new <see cref="Analyzer"/>, reusing the same set of components per-thread
-        /// across calls to <see cref="TokenStream(string, TextReader)"/>.
+        /// across calls to <see cref="GetTokenStream(string, TextReader)"/>.
         /// </summary>
         public Analyzer()
             : this(GLOBAL_REUSE_STRATEGY)
@@ -179,8 +179,8 @@ namespace Lucene.Net.Analysis
         /// <returns> <see cref="Analysis.TokenStream"/> for iterating the analyzed content of <see cref="TextReader"/> </returns>
         /// <exception cref="AlreadyClosedException"> if the Analyzer is disposed. </exception>
         /// <exception cref="IOException"> if an i/o error occurs (may rarely happen for strings). </exception>
-        /// <seealso cref="TokenStream(string, string)"/>
-        public TokenStream TokenStream(string fieldName, TextReader reader) // LUCENENET TODO: Rename GetTokenStream ?
+        /// <seealso cref="GetTokenStream(string, string)"/>
+        public TokenStream GetTokenStream(string fieldName, TextReader reader)
         {
             TokenStreamComponents components = reuseStrategy.GetReusableComponents(this, fieldName);
             TextReader r = InitReader(fieldName, reader);
@@ -216,8 +216,8 @@ namespace Lucene.Net.Analysis
         /// <returns><see cref="Analysis.TokenStream"/> for iterating the analyzed content of <paramref name="reader"/></returns>
         /// <exception cref="AlreadyClosedException"> if the Analyzer is disposed. </exception>
         /// <exception cref="IOException"> if an i/o error occurs (may rarely happen for strings). </exception>
-        /// <seealso cref="TokenStream(string, TextReader)"/>
-        public TokenStream TokenStream(string fieldName, string text) // LUCENENET TODO: Rename GetTokenStream ?
+        /// <seealso cref="GetTokenStream(string, TextReader)"/>
+        public TokenStream GetTokenStream(string fieldName, string text)
         {
             TokenStreamComponents components = reuseStrategy.GetReusableComponents(this, fieldName);
             ReusableStringReader strReader =
@@ -264,7 +264,7 @@ namespace Lucene.Net.Analysis
         /// exact <see cref="Search.PhraseQuery"/> matches, for instance, across <see cref="Index.IIndexableField"/> instance boundaries.
         /// </summary>
         /// <param name="fieldName"> <see cref="Index.IIndexableField"/> name being indexed. </param>
-        /// <returns> position increment gap, added to the next token emitted from <see cref="TokenStream(string, TextReader)"/>.
+        /// <returns> position increment gap, added to the next token emitted from <see cref="GetTokenStream(string, TextReader)"/>.
         ///         this value must be <c>&gt;= 0</c>.</returns>
         public virtual int GetPositionIncrementGap(string fieldName)
         {
@@ -278,7 +278,7 @@ namespace Lucene.Net.Analysis
         /// produced at least one token for indexing.
         /// </summary>
         /// <param name="fieldName"> the field just indexed </param>
-        /// <returns> offset gap, added to the next token emitted from <see cref="TokenStream(string, TextReader)"/>.
+        /// <returns> offset gap, added to the next token emitted from <see cref="GetTokenStream(string, TextReader)"/>.
         ///         this value must be <c>&gt;= 0</c>. </returns>
         public virtual int GetOffsetGap(string fieldName)
         {
@@ -312,7 +312,7 @@ namespace Lucene.Net.Analysis
         /// access to the source (<see cref="Analysis.Tokenizer"/>) and the outer end (sink), an
         /// instance of <see cref="TokenFilter"/> which also serves as the
         /// <see cref="Analysis.TokenStream"/> returned by
-        /// <seealso cref="Analyzer.TokenStream(string, TextReader)"/>.
+        /// <seealso cref="Analyzer.GetTokenStream(string, TextReader)"/>.
         /// </summary>
         public class TokenStreamComponents
         {
@@ -328,7 +328,7 @@ namespace Lucene.Net.Analysis
             protected readonly TokenStream m_sink;
 
             /// <summary>
-            /// Internal cache only used by <see cref="Analyzer.TokenStream(string, string)"/>. </summary>
+            /// Internal cache only used by <see cref="Analyzer.GetTokenStream(string, string)"/>. </summary>
             internal ReusableStringReader reusableStringReader;
 
             /// <summary>
@@ -395,7 +395,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Strategy defining how <see cref="TokenStreamComponents"/> are reused per call to
-        /// <see cref="Analyzer.TokenStream(string, TextReader)"/>.
+        /// <see cref="Analyzer.GetTokenStream(string, TextReader)"/>.
         /// </summary>
         public abstract class ReuseStrategy
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs b/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs
index e764ab1..b97ebe4 100644
--- a/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs
+++ b/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Analysis
      */
 
     /// <summary>
-    /// Internal class to enable reuse of the string reader by <see cref="Analyzer.TokenStream(string, string)"/>
+    /// Internal class to enable reuse of the string reader by <see cref="Analyzer.GetTokenStream(string, string)"/>
     /// </summary>
     public sealed class ReusableStringReader : System.IO.TextReader
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Core/Document/Field.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Document/Field.cs b/src/Lucene.Net.Core/Document/Field.cs
index fb55704..9412b7f 100644
--- a/src/Lucene.Net.Core/Document/Field.cs
+++ b/src/Lucene.Net.Core/Document/Field.cs
@@ -620,12 +620,12 @@ namespace Lucene.Net.Documents
             }
             else if (GetReaderValue() != null)
             {
-                return analyzer.TokenStream(Name, GetReaderValue());
+                return analyzer.GetTokenStream(Name, GetReaderValue());
             }
             else if (GetStringValue() != null)
             {
                 TextReader sr = new StringReader(GetStringValue());
-                return analyzer.TokenStream(Name, sr);
+                return analyzer.GetTokenStream(Name, sr);
             }
 
             throw new System.ArgumentException("Field must have either TokenStream, String, Reader or Number value; got " + this);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Core/Util/QueryBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Util/QueryBuilder.cs b/src/Lucene.Net.Core/Util/QueryBuilder.cs
index 86caddd..18cd2d0 100644
--- a/src/Lucene.Net.Core/Util/QueryBuilder.cs
+++ b/src/Lucene.Net.Core/Util/QueryBuilder.cs
@@ -206,7 +206,7 @@ namespace Lucene.Net.Util
             TokenStream source = null;
             try
             {
-                source = analyzer.TokenStream(field, new StringReader(queryText));
+                source = analyzer.GetTokenStream(field, new StringReader(queryText));
                 source.Reset();
                 buffer = new CachingTokenFilter(source);
                 buffer.Reset();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs b/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
index 4415745..56b78d5 100644
--- a/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
@@ -68,7 +68,7 @@ namespace Lucene.Net.Search.Highlight
         /// <exception cref="InvalidTokenOffsetsException">thrown if any token's EndOffset exceeds the provided text's length</exception>
         public string GetBestFragment(Analyzer analyzer, string fieldName, string text)
         {
-            TokenStream tokenStream = analyzer.TokenStream(fieldName, text);
+            TokenStream tokenStream = analyzer.GetTokenStream(fieldName, text);
             return GetBestFragment(tokenStream, text);
         }
 
@@ -114,7 +114,7 @@ namespace Lucene.Net.Search.Highlight
             string text,
             int maxNumFragments)
         {
-            TokenStream tokenStream = analyzer.TokenStream(fieldName, text);
+            TokenStream tokenStream = analyzer.GetTokenStream(fieldName, text);
             return GetBestFragments(tokenStream, text, maxNumFragments);
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs b/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
index 842eedf..7efbcf7 100644
--- a/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
@@ -340,7 +340,7 @@ namespace Lucene.Net.Search.Highlight
         {
             try
             {
-                return analyzer.TokenStream(field, contents);
+                return analyzer.GetTokenStream(field, contents);
             }
             catch (IOException ex)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
index a7db1eb..eed7fe5 100644
--- a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
+++ b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
@@ -548,7 +548,7 @@ namespace Lucene.Net.Search.PostingsHighlight
                 // if there are multi-term matches, we have to initialize the "fake" enum for each document
                 if (automata.Length > 0)
                 {
-                    DocsAndPositionsEnum dp = MultiTermHighlighting.GetDocsEnum(analyzer.TokenStream(field, content), automata);
+                    DocsAndPositionsEnum dp = MultiTermHighlighting.GetDocsEnum(analyzer.GetTokenStream(field, content), automata);
                     dp.Advance(doc - subContext.DocBase);
                     postings[terms.Length - 1] = dp; // last term is the multiterm matcher
                 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Memory/MemoryIndex.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Memory/MemoryIndex.cs b/src/Lucene.Net.Memory/MemoryIndex.cs
index 2152d43..a211c45 100644
--- a/src/Lucene.Net.Memory/MemoryIndex.cs
+++ b/src/Lucene.Net.Memory/MemoryIndex.cs
@@ -249,7 +249,7 @@ namespace Lucene.Net.Index.Memory
             TokenStream stream;
             try
             {
-                stream = analyzer.TokenStream(fieldName, text);
+                stream = analyzer.GetTokenStream(fieldName, text);
             }
             catch (IOException ex)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
index d557213..f4d32c8 100644
--- a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
+++ b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
@@ -584,7 +584,7 @@ namespace Lucene.Net.Queries.Mlt
             {
                 throw new System.NotSupportedException("To use MoreLikeThis without " + "term vectors, you must provide an Analyzer");
             }
-            var ts = Analyzer.TokenStream(fieldName, r);
+            var ts = Analyzer.GetTokenStream(fieldName, r);
             try
             {
                 int tokenCount = 0;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs b/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs
index df8c4f4..fb51b28 100644
--- a/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs
@@ -165,7 +165,7 @@ namespace Lucene.Net.QueryParsers.Analyzing
             TokenStream stream = null;
             try
             {
-                stream = Analyzer.TokenStream(field, chunk);
+                stream = Analyzer.GetTokenStream(field, chunk);
                 stream.Reset();
                 ICharTermAttribute termAtt = stream.GetAttribute<ICharTermAttribute>();
                 // get first and hopefully only output token

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs b/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs
index 2702716..0779bf6 100644
--- a/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs
+++ b/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs
@@ -563,7 +563,7 @@ namespace Lucene.Net.QueryParsers.Classic
             TokenStream source = null;
             try
             {
-                source = analyzerIn.TokenStream(field, part);
+                source = analyzerIn.GetTokenStream(field, part);
                 source.Reset();
 
                 ITermToBytesRefAttribute termAtt = source.GetAttribute<ITermToBytesRefAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs
index 89ce4fe..559a3a5 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs
@@ -116,7 +116,7 @@ namespace Lucene.Net.QueryParsers.Flexible.Standard.Processors
                 TokenStream source = null;
                 try
                 {
-                    source = this.analyzer.TokenStream(field, text);
+                    source = this.analyzer.GetTokenStream(field, text);
                     source.Reset();
                     buffer = new CachingTokenFilter(source);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs b/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs
index 5b04d1c..4e6a70a 100644
--- a/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs
+++ b/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs
@@ -76,7 +76,7 @@ namespace Lucene.Net.QueryParsers.Xml.Builders
                     TokenStream ts = null;
                     try
                     {
-                        ts = analyzer.TokenStream(field, stopWords);
+                        ts = analyzer.GetTokenStream(field, stopWords);
                         ICharTermAttribute termAtt = ts.AddAttribute<ICharTermAttribute>();
                         ts.Reset();
                         while (ts.IncrementToken())

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs b/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs
index 1738e0d..5083797 100644
--- a/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs
+++ b/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs
@@ -48,7 +48,7 @@ namespace Lucene.Net.QueryParsers.Xml.Builders
             TokenStream ts = null;
             try
             {
-                ts = analyzer.TokenStream(fieldName, value);
+                ts = analyzer.GetTokenStream(fieldName, value);
                 ITermToBytesRefAttribute termAtt = ts.AddAttribute<ITermToBytesRefAttribute>();
                 BytesRef bytes = termAtt.BytesRef;
                 ts.Reset();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs b/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs
index fc562bc..b081a06 100644
--- a/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs
+++ b/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs
@@ -52,7 +52,7 @@ namespace Lucene.Net.QueryParsers.Xml.Builders
             TokenStream ts = null;
             try
             {
-                ts = analyzer.TokenStream(fieldName, text);
+                ts = analyzer.GetTokenStream(fieldName, text);
                 ITermToBytesRefAttribute termAtt = ts.AddAttribute<ITermToBytesRefAttribute>();
                 BytesRef bytes = termAtt.BytesRef;
                 ts.Reset();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs b/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs
index a93a327..946496c 100644
--- a/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs
+++ b/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs
@@ -48,7 +48,7 @@ namespace Lucene.Net.QueryParsers.Xml.Builders
             TokenStream ts = null;
             try
             {
-                ts = analyzer.TokenStream(fieldName, text);
+                ts = analyzer.GetTokenStream(fieldName, text);
                 ITermToBytesRefAttribute termAtt = ts.AddAttribute<ITermToBytesRefAttribute>();
                 Term term = null;
                 BytesRef bytes = termAtt.BytesRef;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs b/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
index b833083..78ce87a 100644
--- a/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
+++ b/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
@@ -196,7 +196,7 @@ namespace Lucene.Net.Sandbox.Queries
             {
                 return;
             }
-            TokenStream ts = analyzer.TokenStream(f.fieldName, f.queryString);
+            TokenStream ts = analyzer.GetTokenStream(f.fieldName, f.queryString);
             try
             {
                 ICharTermAttribute termAtt = ts.AddAttribute<ICharTermAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
index 27cb7b5..0a05e5f 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
@@ -425,7 +425,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
 
             try
             {
-                ts = m_queryAnalyzer.TokenStream("", new StringReader(key));
+                ts = m_queryAnalyzer.GetTokenStream("", new StringReader(key));
 
                 //long t0 = System.currentTimeMillis();
                 ts.Reset();
@@ -630,7 +630,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
         /// </summary>
         protected internal virtual object Highlight(string text, IEnumerable<string> matchedTokens, string prefixToken)
         {
-            TokenStream ts = m_queryAnalyzer.TokenStream("text", new StringReader(text));
+            TokenStream ts = m_queryAnalyzer.GetTokenStream("text", new StringReader(text));
             try
             {
                 var termAtt = ts.AddAttribute<ICharTermAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
index 4aa7dc7..33c741b 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
@@ -950,7 +950,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
         {
             // Analyze surface form:
             Automaton automaton = null;
-            TokenStream ts = indexAnalyzer.TokenStream("", surfaceForm.Utf8ToString());
+            TokenStream ts = indexAnalyzer.GetTokenStream("", surfaceForm.Utf8ToString());
             try
             {
 
@@ -984,7 +984,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
             // TODO: is there a Reader from a CharSequence?
             // Turn tokenstream into automaton:
             Automaton automaton = null;
-            TokenStream ts = queryAnalyzer.TokenStream("", key);
+            TokenStream ts = queryAnalyzer.GetTokenStream("", key);
             try
             {
                 automaton = (TokenStreamToAutomaton).ToAutomaton(ts);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
index f7624c0..a67fe77 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
@@ -532,7 +532,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
                 throw new System.ArgumentException("this suggester doesn't support contexts");
             }
 
-            TokenStream ts = queryAnalyzer.TokenStream("", key.ToString());
+            TokenStream ts = queryAnalyzer.GetTokenStream("", key.ToString());
             try
             {
                 ITermToBytesRefAttribute termBytesAtt = ts.AddAttribute<ITermToBytesRefAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs b/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs
index f7acb7b..c133457 100644
--- a/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs
+++ b/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs
@@ -430,19 +430,19 @@ namespace Lucene.Net.Analysis
         public static void AssertAnalyzesTo(Analyzer a, string input, string[] output, int[] startOffsets, int[] endOffsets, string[] types, int[] posIncrements)
         {
             CheckResetException(a, input);
-            AssertTokenStreamContents(a.TokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, null, input.Length);
+            AssertTokenStreamContents(a.GetTokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, null, input.Length);
         }
 
         public static void AssertAnalyzesTo(Analyzer a, string input, string[] output, int[] startOffsets, int[] endOffsets, string[] types, int[] posIncrements, int[] posLengths)
         {
             CheckResetException(a, input);
-            AssertTokenStreamContents(a.TokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.Length);
+            AssertTokenStreamContents(a.GetTokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.Length);
         }
 
         public static void AssertAnalyzesTo(Analyzer a, string input, string[] output, int[] startOffsets, int[] endOffsets, string[] types, int[] posIncrements, int[] posLengths, bool offsetsAreCorrect)
         {
             CheckResetException(a, input);
-            AssertTokenStreamContents(a.TokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.Length, offsetsAreCorrect);
+            AssertTokenStreamContents(a.GetTokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.Length, offsetsAreCorrect);
         }
 
         public static void AssertAnalyzesTo(Analyzer a, string input, string[] output)
@@ -477,7 +477,7 @@ namespace Lucene.Net.Analysis
 
         internal static void CheckResetException(Analyzer a, string input)
         {
-            TokenStream ts = a.TokenStream("bogus", new StringReader(input));
+            TokenStream ts = a.GetTokenStream("bogus", new StringReader(input));
             try
             {
                 if (ts.IncrementToken())
@@ -515,7 +515,7 @@ namespace Lucene.Net.Analysis
             }
 
             // check for a missing Close()
-            ts = a.TokenStream("bogus", new StringReader(input));
+            ts = a.GetTokenStream("bogus", new StringReader(input));
             ts.Reset();
             while (ts.IncrementToken())
             {
@@ -523,7 +523,7 @@ namespace Lucene.Net.Analysis
             ts.End();
             try
             {
-                ts = a.TokenStream("bogus", new StringReader(input));
+                ts = a.GetTokenStream("bogus", new StringReader(input));
                 Assert.Fail("didn't get expected exception when Close() not called");
             }
             catch (Exception)
@@ -926,7 +926,7 @@ namespace Lucene.Net.Analysis
             StringReader reader = new StringReader(text);
 
             TokenStream ts;
-            using (ts = a.TokenStream("dummy", useCharFilter ? (TextReader) new MockCharFilter(reader, remainder) : reader))
+            using (ts = a.GetTokenStream("dummy", useCharFilter ? (TextReader) new MockCharFilter(reader, remainder) : reader))
             {
                  termAtt = ts.HasAttribute<ICharTermAttribute>()
                     ? ts.GetAttribute<ICharTermAttribute>()
@@ -1002,7 +1002,7 @@ namespace Lucene.Net.Analysis
                             // currently allow it, so, we must call
                             // a.TokenStream inside the try since we may
                             // hit the exc on init:
-                            ts = a.TokenStream("dummy", useCharFilter ? (TextReader)new MockCharFilter(evilReader, remainder) : evilReader);
+                            ts = a.GetTokenStream("dummy", useCharFilter ? (TextReader)new MockCharFilter(evilReader, remainder) : evilReader);
                             ts.Reset();
                             while (ts.IncrementToken()) ;
                             Assert.Fail("did not hit exception");
@@ -1044,7 +1044,7 @@ namespace Lucene.Net.Analysis
                         }
 
                         reader = new StringReader(text);
-                        ts = a.TokenStream("dummy", useCharFilter ? (TextReader)new MockCharFilter(reader, remainder) : reader);
+                        ts = a.GetTokenStream("dummy", useCharFilter ? (TextReader)new MockCharFilter(reader, remainder) : reader);
                         ts.Reset();
                         for (int tokenCount = 0; tokenCount < numTokensToRead; tokenCount++)
                         {
@@ -1097,7 +1097,7 @@ namespace Lucene.Net.Analysis
                 reader = new MockReaderWrapper(random, text);
             }
 
-            ts = a.TokenStream("dummy", useCharFilter ? (TextReader)new MockCharFilter(reader, remainder) : reader);
+            ts = a.GetTokenStream("dummy", useCharFilter ? (TextReader)new MockCharFilter(reader, remainder) : reader);
             if (typeAtt != null && posIncAtt != null && posLengthAtt != null && offsetAtt != null)
             {
                 // offset + pos + posLength + type
@@ -1150,7 +1150,7 @@ namespace Lucene.Net.Analysis
         protected internal virtual string ToDot(Analyzer a, string inputText)
         {
             StringWriter sw = new StringWriter();
-            TokenStream ts = a.TokenStream("field", new StringReader(inputText));
+            TokenStream ts = a.GetTokenStream("field", new StringReader(inputText));
             ts.Reset();
             (new TokenStreamToDot(inputText, ts, /*new StreamWriter(*/(TextWriter)sw/*)*/)).ToDot();
             return sw.ToString();
@@ -1160,7 +1160,7 @@ namespace Lucene.Net.Analysis
         {
             using (StreamWriter w = new StreamWriter(new FileStream(localFileName, FileMode.Open), Encoding.UTF8))
             {
-                TokenStream ts = a.TokenStream("field", new StringReader(inputText));
+                TokenStream ts = a.GetTokenStream("field", new StringReader(inputText));
                 ts.Reset();
                 (new TokenStreamToDot(inputText, ts,/* new PrintWriter(*/w/*)*/)).ToDot();    
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs b/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs
index f515319..bae1753 100644
--- a/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs
+++ b/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs
@@ -175,19 +175,19 @@ namespace Lucene.Net.Analysis
 				doc.Add(new TextField("contents", sortData[i][1], Field.Store.NO));
 				if (sortData[i][2] != null)
 				{
-					doc.Add(new TextField("US", usAnalyzer.TokenStream("US", new StringReader(sortData[i][2]))));
+					doc.Add(new TextField("US", usAnalyzer.GetTokenStream("US", new StringReader(sortData[i][2]))));
 				}
 				if (sortData[i][3] != null)
 				{
-					doc.Add(new TextField("France", franceAnalyzer.TokenStream("France", new StringReader(sortData[i][3]))));
+					doc.Add(new TextField("France", franceAnalyzer.GetTokenStream("France", new StringReader(sortData[i][3]))));
 				}
 				if (sortData[i][4] != null)
 				{
-					doc.Add(new TextField("Sweden", swedenAnalyzer.TokenStream("Sweden", new StringReader(sortData[i][4]))));
+					doc.Add(new TextField("Sweden", swedenAnalyzer.GetTokenStream("Sweden", new StringReader(sortData[i][4]))));
 				}
 				if (sortData[i][5] != null)
 				{
-					doc.Add(new TextField("Denmark", denmarkAnalyzer.TokenStream("Denmark", new StringReader(sortData[i][5]))));
+					doc.Add(new TextField("Denmark", denmarkAnalyzer.GetTokenStream("Denmark", new StringReader(sortData[i][5]))));
 				}
 				writer.AddDocument(doc);
 			}
@@ -248,7 +248,7 @@ namespace Lucene.Net.Analysis
 			{
 				string term = TestUtil.RandomSimpleString(Random());
 				IOException priorException = null;
-				TokenStream ts = analyzer.TokenStream("fake", new StringReader(term));
+				TokenStream ts = analyzer.GetTokenStream("fake", new StringReader(term));
 				try
 				{
 					ITermToBytesRefAttribute termAtt = ts.AddAttribute<ITermToBytesRefAttribute>();
@@ -309,7 +309,7 @@ namespace Lucene.Net.Analysis
 						string term = mapping.Key;
 						BytesRef expected = mapping.Value;
 						IOException priorException = null;
-						TokenStream ts = this.Analyzer.TokenStream("fake", new StringReader(term));
+						TokenStream ts = this.Analyzer.GetTokenStream("fake", new StringReader(term));
 						try
 						{
 							ITermToBytesRefAttribute termAtt = ts.AddAttribute<ITermToBytesRefAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs
index 5669873..38e5671 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.Core
             for (int i = 0; i < 1000; i++)
             {
                 string s = TestUtil.RandomSimpleString(random);
-                assertEquals(s, left.TokenStream("foo", newStringReader(s)), right.TokenStream("foo", newStringReader(s)));
+                assertEquals(s, left.GetTokenStream("foo", newStringReader(s)), right.GetTokenStream("foo", newStringReader(s)));
             }
         }
 
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Core
             for (int i = 0; i < numIterations; i++)
             {
                 string s = TestUtil.RandomSimpleString(random, maxLength);
-                assertEquals(s, left.TokenStream("foo", newStringReader(s)), right.TokenStream("foo", newStringReader(s)));
+                assertEquals(s, left.GetTokenStream("foo", newStringReader(s)), right.GetTokenStream("foo", newStringReader(s)));
             }
         }
 
@@ -127,7 +127,7 @@ namespace Lucene.Net.Analysis.Core
             for (int i = 0; i < 1000; i++)
             {
                 string s = TestUtil.RandomHtmlishString(random, 20);
-                assertEquals(s, left.TokenStream("foo", newStringReader(s)), right.TokenStream("foo", newStringReader(s)));
+                assertEquals(s, left.GetTokenStream("foo", newStringReader(s)), right.GetTokenStream("foo", newStringReader(s)));
             }
         }
 
@@ -159,7 +159,7 @@ namespace Lucene.Net.Analysis.Core
             for (int i = 0; i < numIterations; i++)
             {
                 string s = TestUtil.RandomHtmlishString(random, maxLength);
-                assertEquals(s, left.TokenStream("foo", newStringReader(s)), right.TokenStream("foo", newStringReader(s)));
+                assertEquals(s, left.GetTokenStream("foo", newStringReader(s)), right.GetTokenStream("foo", newStringReader(s)));
             }
         }
 
@@ -188,7 +188,7 @@ namespace Lucene.Net.Analysis.Core
             for (int i = 0; i < 1000; i++)
             {
                 string s = TestUtil.RandomUnicodeString(random);
-                assertEquals(s, left.TokenStream("foo", newStringReader(s)), right.TokenStream("foo", newStringReader(s)));
+                assertEquals(s, left.GetTokenStream("foo", newStringReader(s)), right.GetTokenStream("foo", newStringReader(s)));
             }
         }
 
@@ -220,7 +220,7 @@ namespace Lucene.Net.Analysis.Core
             for (int i = 0; i < numIterations; i++)
             {
                 string s = TestUtil.RandomUnicodeString(random, maxLength);
-                assertEquals(s, left.TokenStream("foo", newStringReader(s)), right.TokenStream("foo", newStringReader(s)));
+                assertEquals(s, left.GetTokenStream("foo", newStringReader(s)), right.GetTokenStream("foo", newStringReader(s)));
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs
index 505d160..5860f89 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs
@@ -96,7 +96,7 @@ namespace Lucene.Net.Analysis.Core
         [Test]
         public virtual void TestOffsets()
         {
-            TokenStream stream = (new KeywordAnalyzer()).TokenStream("field", new StringReader("abcd"));
+            TokenStream stream = (new KeywordAnalyzer()).GetTokenStream("field", new StringReader("abcd"));
             try
             {
                 IOffsetAttribute offsetAtt = stream.AddAttribute<IOffsetAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs
index d15eebd..447bf18 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs
@@ -44,7 +44,7 @@ namespace Lucene.Net.Analysis.Core
         public virtual void TestDefaults()
         {
             assertTrue(stop != null);
-            TokenStream stream = stop.TokenStream("test", "This is a test of the english stop analyzer");
+            TokenStream stream = stop.GetTokenStream("test", "This is a test of the english stop analyzer");
             try
             {
                 assertTrue(stream != null);
@@ -68,7 +68,7 @@ namespace Lucene.Net.Analysis.Core
         {
             CharArraySet stopWordsSet = new CharArraySet(TEST_VERSION_CURRENT, new string[] { "good", "test", "analyzer" }, false);
             StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet);
-            TokenStream stream = newStop.TokenStream("test", "This is a good test of the english stop analyzer");
+            TokenStream stream = newStop.GetTokenStream("test", "This is a good test of the english stop analyzer");
             try
             {
                 assertNotNull(stream);
@@ -95,7 +95,7 @@ namespace Lucene.Net.Analysis.Core
             StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet);
             string s = "This is a good test of the english stop analyzer with positions";
             int[] expectedIncr = new int[] { 1, 1, 1, 3, 1, 1, 1, 2, 1 };
-            TokenStream stream = newStop.TokenStream("test", s);
+            TokenStream stream = newStop.GetTokenStream("test", s);
             try
             {
                 assertNotNull(stream);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzerTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzerTest.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzerTest.cs
index 6a42d93..495b0e8 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzerTest.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzerTest.cs
@@ -123,11 +123,11 @@ namespace Lucene.Net.Analysis.Miscellaneous
             AssertAnalyzesTo(analyzer, document, expected);
 
             // analysis with a "FastStringReader"
-            TokenStream ts = analyzer.TokenStream("dummy", new PatternAnalyzer.FastStringReader(document));
+            TokenStream ts = analyzer.GetTokenStream("dummy", new PatternAnalyzer.FastStringReader(document));
             AssertTokenStreamContents(ts, expected);
 
             // analysis of a String, uses PatternAnalyzer.tokenStream(String, String)
-            TokenStream ts2 = analyzer.TokenStream("dummy", new StringReader(document));
+            TokenStream ts2 = analyzer.GetTokenStream("dummy", new StringReader(document));
             AssertTokenStreamContents(ts2, expected);
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenCountAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenCountAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenCountAnalyzer.cs
index 6b32b8c..ad3dd10 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenCountAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenCountAnalyzer.cs
@@ -39,14 +39,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
                 Analyzer a = new LimitTokenCountAnalyzer(mock, 2, consumeAll);
 
                 // dont use assertAnalyzesTo here, as the end offset is not the end of the string (unless consumeAll is true, in which case its correct)!
-                AssertTokenStreamContents(a.TokenStream("dummy", "1  2     3  4  5"), new string[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? (int?)16 : null);
-                AssertTokenStreamContents(a.TokenStream("dummy", "1 2 3 4 5"), new string[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, consumeAll ? (int?)9 : null);
+                AssertTokenStreamContents(a.GetTokenStream("dummy", "1  2     3  4  5"), new string[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? (int?)16 : null);
+                AssertTokenStreamContents(a.GetTokenStream("dummy", "1 2 3 4 5"), new string[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, consumeAll ? (int?)9 : null);
 
                 // less than the limit, ensure we behave correctly
-                AssertTokenStreamContents(a.TokenStream("dummy", "1  "), new string[] { "1" }, new int[] { 0 }, new int[] { 1 }, (consumeAll ? (int?)3 : null));
+                AssertTokenStreamContents(a.GetTokenStream("dummy", "1  "), new string[] { "1" }, new int[] { 0 }, new int[] { 1 }, (consumeAll ? (int?)3 : null));
 
                 // equal to limit
-                AssertTokenStreamContents(a.TokenStream("dummy", "1  2  "), new string[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? (int?)6 : null);
+                AssertTokenStreamContents(a.GetTokenStream("dummy", "1  2  "), new string[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? (int?)6 : null);
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenPositionFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenPositionFilter.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenPositionFilter.cs
index e084275..3b4c014 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenPositionFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestLimitTokenPositionFilter.cs
@@ -35,14 +35,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
                 Analyzer a = new AnalyzerAnonymousInnerClassHelper(consumeAll);
 
                 // don't use assertAnalyzesTo here, as the end offset is not the end of the string (unless consumeAll is true, in which case its correct)!
-                AssertTokenStreamContents(a.TokenStream("dummy", "1  2     3  4  5"), new string[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 16 : (int?)null);
-                AssertTokenStreamContents(a.TokenStream("dummy", new StringReader("1 2 3 4 5")), new string[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, consumeAll ? 9 : (int?)null);
+                AssertTokenStreamContents(a.GetTokenStream("dummy", "1  2     3  4  5"), new string[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 16 : (int?)null);
+                AssertTokenStreamContents(a.GetTokenStream("dummy", new StringReader("1 2 3 4 5")), new string[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, consumeAll ? 9 : (int?)null);
 
                 // less than the limit, ensure we behave correctly
-                AssertTokenStreamContents(a.TokenStream("dummy", "1  "), new string[] { "1" }, new int[] { 0 }, new int[] { 1 }, consumeAll ? 3 : (int?)null);
+                AssertTokenStreamContents(a.GetTokenStream("dummy", "1  "), new string[] { "1" }, new int[] { 0 }, new int[] { 1 }, consumeAll ? 3 : (int?)null);
 
                 // equal to limit
-                AssertTokenStreamContents(a.TokenStream("dummy", "1  2  "), new string[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 6 : (int?)null);
+                AssertTokenStreamContents(a.GetTokenStream("dummy", "1  2  "), new string[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 6 : (int?)null);
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs
index 6fd5277..c08c1db 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs
@@ -36,7 +36,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
 
             PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), analyzerPerField);
 
-            TokenStream tokenStream = analyzer.TokenStream("field", text);
+            TokenStream tokenStream = analyzer.GetTokenStream("field", text);
             try
             {
                 ICharTermAttribute termAtt = tokenStream.GetAttribute<ICharTermAttribute>();
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
                 IOUtils.CloseWhileHandlingException(tokenStream);
             }
 
-            tokenStream = analyzer.TokenStream("special", text);
+            tokenStream = analyzer.GetTokenStream("special", text);
             try
             {
                 ICharTermAttribute termAtt = tokenStream.GetAttribute<ICharTermAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzerTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzerTest.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzerTest.cs
index d062fa0..4de8d09 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzerTest.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzerTest.cs
@@ -64,10 +64,10 @@ namespace Lucene.Net.Analysis.Query
         {
             // Note: an empty list of fields passed in
             protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Collections.EmptyList<string>(), 1);
-            TokenStream protectedTokenStream = protectedAnalyzer.TokenStream("variedField", "quick");
+            TokenStream protectedTokenStream = protectedAnalyzer.GetTokenStream("variedField", "quick");
             AssertTokenStreamContents(protectedTokenStream, new string[] { "quick" });
 
-            protectedTokenStream = protectedAnalyzer.TokenStream("repetitiveField", "boring");
+            protectedTokenStream = protectedAnalyzer.GetTokenStream("repetitiveField", "boring");
             AssertTokenStreamContents(protectedTokenStream, new string[] { "boring" });
         }
 
@@ -75,7 +75,7 @@ namespace Lucene.Net.Analysis.Query
         public virtual void TestDefaultStopwordsAllFields()
         {
             protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader);
-            TokenStream protectedTokenStream = protectedAnalyzer.TokenStream("repetitiveField", "boring");
+            TokenStream protectedTokenStream = protectedAnalyzer.GetTokenStream("repetitiveField", "boring");
             AssertTokenStreamContents(protectedTokenStream, new string[0]); // Default stop word filtering will remove boring
         }
 
@@ -84,16 +84,16 @@ namespace Lucene.Net.Analysis.Query
         {
             protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, 1f / 2f);
 
-            TokenStream protectedTokenStream = protectedAnalyzer.TokenStream("repetitiveField", "boring");
+            TokenStream protectedTokenStream = protectedAnalyzer.GetTokenStream("repetitiveField", "boring");
             // A filter on terms in > one half of docs remove boring
             AssertTokenStreamContents(protectedTokenStream, new string[0]);
 
-            protectedTokenStream = protectedAnalyzer.TokenStream("repetitiveField", "vaguelyboring");
+            protectedTokenStream = protectedAnalyzer.GetTokenStream("repetitiveField", "vaguelyboring");
             // A filter on terms in > half of docs should not remove vaguelyBoring
             AssertTokenStreamContents(protectedTokenStream, new string[] { "vaguelyboring" });
 
             protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, 1f / 4f);
-            protectedTokenStream = protectedAnalyzer.TokenStream("repetitiveField", "vaguelyboring");
+            protectedTokenStream = protectedAnalyzer.GetTokenStream("repetitiveField", "vaguelyboring");
             // A filter on terms in > quarter of docs should remove vaguelyBoring
             AssertTokenStreamContents(protectedTokenStream, new string[0]);
         }
@@ -102,12 +102,12 @@ namespace Lucene.Net.Analysis.Query
         public virtual void TestStopwordsPerFieldMaxPercentDocs()
         {
             protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.AsList("variedField"), 1f / 2f);
-            TokenStream protectedTokenStream = protectedAnalyzer.TokenStream("repetitiveField", "boring");
+            TokenStream protectedTokenStream = protectedAnalyzer.GetTokenStream("repetitiveField", "boring");
             // A filter on one Field should not affect queries on another
             AssertTokenStreamContents(protectedTokenStream, new string[] { "boring" });
 
             protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.AsList("variedField", "repetitiveField"), 1f / 2f);
-            protectedTokenStream = protectedAnalyzer.TokenStream("repetitiveField", "boring");
+            protectedTokenStream = protectedAnalyzer.GetTokenStream("repetitiveField", "boring");
             // A filter on the right Field should affect queries on it
             AssertTokenStreamContents(protectedTokenStream, new string[0]);
         }
@@ -129,11 +129,11 @@ namespace Lucene.Net.Analysis.Query
         {
             protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.AsList("repetitiveField"), 10);
 
-            TokenStream protectedTokenStream = protectedAnalyzer.TokenStream("repetitiveField", "boring");
+            TokenStream protectedTokenStream = protectedAnalyzer.GetTokenStream("repetitiveField", "boring");
             // Check filter set up OK
             AssertTokenStreamContents(protectedTokenStream, new string[0]);
 
-            protectedTokenStream = protectedAnalyzer.TokenStream("variedField", "boring");
+            protectedTokenStream = protectedAnalyzer.GetTokenStream("variedField", "boring");
             // Filter should not prevent stopwords in one field being used in another
             AssertTokenStreamContents(protectedTokenStream, new string[] { "boring" });
         }
@@ -142,7 +142,7 @@ namespace Lucene.Net.Analysis.Query
         public virtual void TestTokenStream()
         {
             QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false), reader, 10);
-            TokenStream ts = a.TokenStream("repetitiveField", "this boring");
+            TokenStream ts = a.GetTokenStream("repetitiveField", "this boring");
             AssertTokenStreamContents(ts, new string[] { "this" });
         }
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs
index 38e47ec..a711d7d 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs
@@ -91,7 +91,7 @@ namespace Lucene.Net.Analysis.Shingle
         {
             PhraseQuery q = new PhraseQuery();
 
-            TokenStream ts = analyzer.TokenStream("content", "this sentence");
+            TokenStream ts = analyzer.GetTokenStream("content", "this sentence");
             try
             {
                 int j = -1;
@@ -128,7 +128,7 @@ namespace Lucene.Net.Analysis.Shingle
         {
             BooleanQuery q = new BooleanQuery();
 
-            TokenStream ts = analyzer.TokenStream("content", "test sentence");
+            TokenStream ts = analyzer.GetTokenStream("content", "test sentence");
             try
             {
                 ICharTermAttribute termAtt = ts.AddAttribute<ICharTermAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Sinks/TestTeeSinkTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Sinks/TestTeeSinkTokenFilter.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Sinks/TestTeeSinkTokenFilter.cs
index 43f4790..01249ba 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Sinks/TestTeeSinkTokenFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Sinks/TestTeeSinkTokenFilter.cs
@@ -99,7 +99,7 @@ namespace Lucene.Net.Analysis.Sinks
             Analyzer analyzer = new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false);
             IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
             Document doc = new Document();
-            TokenStream tokenStream = analyzer.TokenStream("field", "abcd   ");
+            TokenStream tokenStream = analyzer.GetTokenStream("field", "abcd   ");
             TeeSinkTokenFilter tee = new TeeSinkTokenFilter(tokenStream);
             TokenStream sink = tee.NewSinkTokenStream();
             FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs
index 5ff98e2..0cb17c7 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs
@@ -154,10 +154,10 @@ namespace Lucene.Net.Analysis.Th
             ThaiAnalyzer analyzer = new ThaiAnalyzer(LuceneVersion.LUCENE_30);
 #pragma warning restore 612, 618
             // just consume
-            TokenStream ts = analyzer.TokenStream("dummy", "ภาษาไทย");
+            TokenStream ts = analyzer.GetTokenStream("dummy", "ภาษาไทย");
             AssertTokenStreamContents(ts, new string[] { "ภาษา", "ไทย" });
             // this consumer adds flagsAtt, which this analyzer does not use. 
-            ts = analyzer.TokenStream("dummy", "ภาษาไทย");
+            ts = analyzer.GetTokenStream("dummy", "ภาษาไทย");
             ts.AddAttribute<IFlagsAttribute>();
             AssertTokenStreamContents(ts, new string[] { "ภาษา", "ไทย" });
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs
index 106bafa..f9f935f 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs
@@ -124,7 +124,7 @@ namespace Lucene.Net.Analysis.Util
             for (var i = 0; i < num; i++)
             {
                 var s = TestUtil.RandomUnicodeString(Random());
-                var ts = analyzer.TokenStream("foo", s);
+                var ts = analyzer.GetTokenStream("foo", s);
                 try
                 {
                     ts.Reset();
@@ -187,7 +187,7 @@ namespace Lucene.Net.Analysis.Util
             for (var i = 0; i < num; i++)
             {
                 var s = TestUtil.RandomUnicodeString(Random());
-                var ts = analyzer.TokenStream("foo", s);
+                var ts = analyzer.GetTokenStream("foo", s);
                 try
                 {
                     ts.Reset();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs
index e2e5dea..e09d150 100644
--- a/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs
@@ -87,7 +87,7 @@ namespace Lucene.Net.Analysis.Pl
             var text = "zyaolz 96619727 p";
             var reader = new StringReader(text);
             int remainder = 2;
-            using (var ts = a.TokenStream("dummy", (TextReader)new MockCharFilter(reader, remainder)))
+            using (var ts = a.GetTokenStream("dummy", (TextReader)new MockCharFilter(reader, remainder)))
             {
                 ts.Reset();
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Highlighter/Highlight/Custom/HighlightCustomQueryTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Highlighter/Highlight/Custom/HighlightCustomQueryTest.cs b/src/Lucene.Net.Tests.Highlighter/Highlight/Custom/HighlightCustomQueryTest.cs
index 1e461d8..481e15a 100644
--- a/src/Lucene.Net.Tests.Highlighter/Highlight/Custom/HighlightCustomQueryTest.cs
+++ b/src/Lucene.Net.Tests.Highlighter/Highlight/Custom/HighlightCustomQueryTest.cs
@@ -78,7 +78,7 @@ namespace Lucene.Net.Search.Highlight.Custom
             String text)
         {
             TokenStream tokenStream = new MockAnalyzer(Random(), MockTokenizer.SIMPLE,
-                true, MockTokenFilter.ENGLISH_STOPSET).TokenStream(fieldName, text);
+                true, MockTokenFilter.ENGLISH_STOPSET).GetTokenStream(fieldName, text);
             // Assuming "<B>", "</B>" used to highlight
             SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
             MyQueryScorer scorer = new MyQueryScorer(query, fieldName, FIELD_NAME);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs b/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs
index 2cc4b14..3bc972e 100644
--- a/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs
+++ b/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs
@@ -232,7 +232,7 @@ namespace Lucene.Net.Search.Highlight
         private String highlightField(Query query, String fieldName, String text)
         {
             TokenStream tokenStream = new MockAnalyzer(Random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)
-                .TokenStream(fieldName, text);
+                .GetTokenStream(fieldName, text);
             // Assuming "<B>", "</B>" used to highlight
             SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
             QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME);
@@ -256,7 +256,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
                 String result = highlighter.GetBestFragments(tokenStream, text, maxNumFragmentsRequired,
@@ -339,7 +339,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
@@ -369,7 +369,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
@@ -399,7 +399,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
@@ -427,7 +427,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
@@ -455,7 +455,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
@@ -483,7 +483,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
@@ -513,7 +513,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).GetField(NUMERIC_FIELD_NAME).GetNumericValue().toString();
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
@@ -542,7 +542,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 String result = highlighter.GetBestFragments(tokenStream, text, maxNumFragmentsRequired,
                     "...");
@@ -567,7 +567,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
                 QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
                 Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -600,7 +600,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleSpanFragmenter(scorer, 5));
 
@@ -624,7 +624,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleSpanFragmenter(scorer, 20));
 
@@ -658,7 +658,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 highlighter.TextFragmenter = (new SimpleFragmenter(40));
 
@@ -735,7 +735,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                 String result = highlighter.GetBestFragments(tokenStream, text, maxNumFragmentsRequired,
                     "...");
@@ -933,7 +933,7 @@ namespace Lucene.Net.Search.Highlight
                 int maxNumFragmentsRequired = 2;
                 String fragmentSeparator = "...";
                 QueryScorer scorer = new QueryScorer(query, HighlighterTest.FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(HighlighterTest.FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(HighlighterTest.FIELD_NAME, text);
 
                 Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -958,7 +958,7 @@ namespace Lucene.Net.Search.Highlight
                 int maxNumFragmentsRequired = 2;
                 String fragmentSeparator = "...";
                 QueryScorer scorer = new QueryScorer(query, null);
-                TokenStream tokenStream = analyzer.TokenStream(HighlighterTest.FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(HighlighterTest.FIELD_NAME, text);
 
                 Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -983,7 +983,7 @@ namespace Lucene.Net.Search.Highlight
                 int maxNumFragmentsRequired = 2;
                 String fragmentSeparator = "...";
                 QueryScorer scorer = new QueryScorer(query, "random_field", HighlighterTest.FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(HighlighterTest.FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(HighlighterTest.FIELD_NAME, text);
 
                 Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -1149,7 +1149,7 @@ namespace Lucene.Net.Search.Highlight
                 for (int i = 0; i < hits.TotalHits; i++)
                 {
                     String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                    TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                    TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                     Highlighter highlighter = instance.GetHighlighter(query, FIELD_NAME,
                         this);
@@ -1209,7 +1209,7 @@ namespace Lucene.Net.Search.Highlight
                 Highlighter highlighter = instance.GetHighlighter(wTerms, this);// new
                                                                                 // Highlighter(new
                                                                                 // QueryTermScorer(wTerms));
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, texts[0]);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, texts[0]);
                 highlighter.TextFragmenter = (new SimpleFragmenter(2));
 
                 String result = highlighter.GetBestFragment(tokenStream, texts[0]).Trim();
@@ -1218,7 +1218,7 @@ namespace Lucene.Net.Search.Highlight
 
                 // readjust weights
                 wTerms[1].Weight = (50f);
-                tokenStream = analyzer.TokenStream(FIELD_NAME, texts[0]);
+                tokenStream = analyzer.GetTokenStream(FIELD_NAME, texts[0]);
                 highlighter = instance.GetHighlighter(wTerms, this);
                 highlighter.TextFragmenter = (new SimpleFragmenter(2));
 
@@ -1252,7 +1252,7 @@ namespace Lucene.Net.Search.Highlight
                 Highlighter highlighter = instance.GetHighlighter(query, null, this);
 
                 // Get 3 best fragments and separate with a "..."
-                TokenStream tokenStream = analyzer.TokenStream(null, s);
+                TokenStream tokenStream = analyzer.GetTokenStream(null, s);
 
                 String result = highlighter.GetBestFragments(tokenStream, s, 3, "...");
                 String expectedResult = "<B>football</B>-<B>soccer</B> in the euro 2004 <B>footie</B> competition";
@@ -1276,7 +1276,7 @@ namespace Lucene.Net.Search.Highlight
                 for (int i = 0; i < hits.TotalHits; i++)
                 {
                     String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                    TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                    TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
                     Highlighter highlighter = instance.GetHighlighter(query, FIELD_NAME,
                         this);
                     String result = highlighter.GetBestFragment(tokenStream, text);
@@ -1299,7 +1299,7 @@ namespace Lucene.Net.Search.Highlight
                 for (int i = 0; i < hits.TotalHits; i++)
                 {
                     String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                    TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                    TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
 
                     Highlighter highlighter = instance.GetHighlighter(query, FIELD_NAME,
                         this);// new Highlighter(this, new
@@ -1307,7 +1307,7 @@ namespace Lucene.Net.Search.Highlight
                     highlighter.TextFragmenter = (new SimpleFragmenter(20));
                     String[] stringResults = highlighter.GetBestFragments(tokenStream, text, 10);
 
-                    tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                    tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
                     TextFragment[] fragmentResults = highlighter.GetBestTextFragments(tokenStream, text,
                         true, 10);
 
@@ -1338,7 +1338,7 @@ namespace Lucene.Net.Search.Highlight
             {
                 numHighlights = 0;
                 doSearching(new TermQuery(new Term(FIELD_NAME, "meat")));
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, texts[0]);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, texts[0]);
                 Highlighter highlighter = instance.GetHighlighter(query, FIELD_NAME,
                     this);// new Highlighter(this, new
                           // QueryTermScorer(query));
@@ -1453,7 +1453,7 @@ namespace Lucene.Net.Search.Highlight
                 for (int i = 0; i < hits.TotalHits; i++)
                 {
                     String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                    TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                    TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
                     Highlighter highlighter = instance.GetHighlighter(query, FIELD_NAME, this, false);
 
                     highlighter.TextFragmenter = (new SimpleFragmenter(40));
@@ -1482,7 +1482,7 @@ namespace Lucene.Net.Search.Highlight
 
                 foreach (String text in texts)
                 {
-                    TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                    TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
                     Highlighter highlighter = instance.GetHighlighter(query, FIELD_NAME,
                         this);
                     String result = highlighter.GetBestFragment(tokenStream, text);
@@ -1508,7 +1508,7 @@ namespace Lucene.Net.Search.Highlight
             Highlighter highlighter = new Highlighter(this, new SimpleHTMLEncoder(), new TestEncodingScorerAnonymousHelper(this));
 
             highlighter.TextFragmenter = (new SimpleFragmenter(2000));
-            TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, rawDocContent);
+            TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, rawDocContent);
 
             String encodedSnippet = highlighter.GetBestFragments(tokenStream, rawDocContent, 1, "");
             // An ugly bit of XML creation:
@@ -1940,7 +1940,7 @@ namespace Lucene.Net.Search.Highlight
             for (int i = 0; i < hits.TotalHits; i++)
             {
                 String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
-                TokenStream tokenStream = analyzer.TokenStream(FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
                 QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
                 Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -2249,7 +2249,7 @@ namespace Lucene.Net.Search.Highlight
                 int maxNumFragmentsRequired = 2;
                 String fragmentSeparator = "...";
                 IScorer scorer = null;
-                TokenStream tokenStream = analyzer.TokenStream(HighlighterTest.FIELD_NAME, text);
+                TokenStream tokenStream = analyzer.GetTokenStream(HighlighterTest.FIELD_NAME, text);
                 if (mode == QUERY)
                 {
                     scorer = new QueryScorer(query);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs b/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs
index 4d9a52c..1203f2f 100644
--- a/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs
+++ b/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs
@@ -171,7 +171,7 @@ namespace Lucene.Net.Search.VectorHighlight
         {
             List<BytesRef> bytesRefs = new List<BytesRef>();
 
-            TokenStream tokenStream = analyzer.TokenStream(field, text);
+            TokenStream tokenStream = analyzer.GetTokenStream(field, text);
             try
             {
                 ITermToBytesRefAttribute termAttribute = tokenStream.GetAttribute<ITermToBytesRefAttribute>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Memory/Index/Memory/MemoryIndexTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Memory/Index/Memory/MemoryIndexTest.cs b/src/Lucene.Net.Tests.Memory/Index/Memory/MemoryIndexTest.cs
index fef76e2..637a6e1 100644
--- a/src/Lucene.Net.Tests.Memory/Index/Memory/MemoryIndexTest.cs
+++ b/src/Lucene.Net.Tests.Memory/Index/Memory/MemoryIndexTest.cs
@@ -388,7 +388,7 @@ namespace Lucene.Net.Index.Memory
             SpanQuery wrappedquery = new SpanMultiTermQueryWrapper<RegexpQuery>(regex);
 
             MemoryIndex mindex = new MemoryIndex(Random().nextBoolean(), Random().nextInt(50) * 1024 * 1024);
-            mindex.AddField("field", new MockAnalyzer(Random()).TokenStream("field", "hello there"));
+            mindex.AddField("field", new MockAnalyzer(Random()).GetTokenStream("field", "hello there"));
 
             // This throws an NPE
             assertEquals(0, mindex.Search(wrappedquery), 0.00001f);
@@ -402,7 +402,7 @@ namespace Lucene.Net.Index.Memory
             SpanQuery wrappedquery = new SpanOrQuery(new SpanMultiTermQueryWrapper<RegexpQuery>(regex));
 
             MemoryIndex mindex = new MemoryIndex(Random().nextBoolean(), Random().nextInt(50) * 1024 * 1024);
-            mindex.AddField("field", new MockAnalyzer(Random()).TokenStream("field", "hello there"));
+            mindex.AddField("field", new MockAnalyzer(Random()).GetTokenStream("field", "hello there"));
 
             // This passes though
             assertEquals(0, mindex.Search(wrappedquery), 0.00001f);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b5cae3f3/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs b/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs
index e1c6b31..bcc362d 100644
--- a/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs
+++ b/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs
@@ -140,7 +140,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
 
             protected internal override object Highlight(string text, IEnumerable<string> matchedTokens, string prefixToken)
             {
-                TokenStream ts = m_queryAnalyzer.TokenStream("text", new StringReader(text));
+                TokenStream ts = m_queryAnalyzer.GetTokenStream("text", new StringReader(text));
                 try
                 {
                     ICharTermAttribute termAtt = ts.AddAttribute<ICharTermAttribute>();


Mime
View raw message