lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [08/27] lucenenet git commit: Lucene.Net.Core.Analysis: Cleaned up documentation comments
Date Thu, 02 Mar 2017 01:10:51 GMT
Lucene.Net.Core.Analysis: Cleaned up documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/37c78c2e
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/37c78c2e
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/37c78c2e

Branch: refs/heads/api-work
Commit: 37c78c2e775b76a913bffbf0b0cbaeba180392fd
Parents: 33b65d2
Author: Shad Storhaug <shad@shadstorhaug.com>
Authored: Wed Mar 1 17:34:28 2017 +0700
Committer: Shad Storhaug <shad@shadstorhaug.com>
Committed: Thu Mar 2 08:08:41 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Core/Analysis/Analyzer.cs        | 269 +++++++------
 src/Lucene.Net.Core/Analysis/AnalyzerWrapper.cs |  59 ++-
 .../Analysis/CachingTokenFilter.cs              |  20 +-
 src/Lucene.Net.Core/Analysis/CharFilter.cs      |  54 +--
 .../Analysis/NumericTokenStream.cs              | 126 +++---
 .../Analysis/ReusableStringReader.cs            |   3 +-
 src/Lucene.Net.Core/Analysis/Token.cs           | 381 ++++++++++---------
 .../TokenAttributes/CharTermAttribute.cs        |   9 +-
 .../Analysis/TokenAttributes/FlagsAttribute.cs  |   2 +-
 .../TokenAttributes/ICharTermAttribute.cs       |   4 +-
 .../Analysis/TokenAttributes/IFlagsAttribute.cs |  16 +-
 .../TokenAttributes/IKeywordAttribute.cs        |  15 +-
 .../TokenAttributes/IOffsetAttribute.cs         |  35 +-
 .../TokenAttributes/IPayloadAttribute.cs        |  14 +-
 .../IPositionIncrementAttribute.cs              |  29 +-
 .../TokenAttributes/IPositionLengthAttribute.cs |  30 +-
 .../Analysis/TokenAttributes/ITermAttribute.cs  |   3 +-
 .../TokenAttributes/ITermToBytesRefAttribute.cs |  43 ++-
 .../Analysis/TokenAttributes/ITypeAttribute.cs  |  11 +-
 .../TokenAttributes/KeywordAttribute.cs         |   2 +-
 .../Analysis/TokenAttributes/OffsetAttribute.cs |   2 +-
 .../TokenAttributes/PayloadAttribute.cs         |   2 +-
 .../PositionIncrementAttribute.cs               |   2 +-
 .../TokenAttributes/PositionLengthAttribute.cs  |   2 +-
 .../Analysis/TokenAttributes/TypeAttribute.cs   |   7 +-
 src/Lucene.Net.Core/Analysis/TokenFilter.cs     |  40 +-
 src/Lucene.Net.Core/Analysis/TokenStream.cs     | 124 +++---
 .../Analysis/TokenStreamToAutomaton.cs          |  29 +-
 src/Lucene.Net.Core/Analysis/Tokenizer.cs       |  48 ++-
 29 files changed, 733 insertions(+), 648 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/Analyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/Analyzer.cs b/src/Lucene.Net.Core/Analysis/Analyzer.cs
index 65c164e..bb31036 100644
--- a/src/Lucene.Net.Core/Analysis/Analyzer.cs
+++ b/src/Lucene.Net.Core/Analysis/Analyzer.cs
@@ -25,56 +25,55 @@ namespace Lucene.Net.Analysis
     using AlreadyClosedException = Lucene.Net.Store.AlreadyClosedException;
 
     /// <summary>
-    /// An Analyzer builds TokenStreams, which analyze text.  It thus represents a
+    /// An <see cref="Analyzer"/> builds <see cref="Analysis.TokenStream"/>s, which analyze text.  It thus represents a
     /// policy for extracting index terms from text.
-    /// <p>
+    /// <para/>
     /// In order to define what analysis is done, subclasses must define their
-    /// <seealso cref="TokenStreamComponents TokenStreamComponents"/> in <seealso cref="#createComponents(String, Reader)"/>.
-    /// The components are then reused in each call to <seealso cref="#tokenStream(String, Reader)"/>.
-    /// <p>
+    /// <see cref="TokenStreamComponents"/> in <see cref="CreateComponents(string, TextReader)"/>.
+    /// The components are then reused in each call to <see cref="TokenStream(string, TextReader)"/>.
+    /// <para/>
     /// Simple example:
-    /// <pre class="prettyprint">
-    /// Analyzer analyzer = new Analyzer() {
-    ///   protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader) {
+    /// <code>
+    /// Analyzer analyzer = new Analyzer.NewAnonymous((fieldName, reader) => 
+    /// {
     ///     Tokenizer source = new FooTokenizer(reader);
     ///     TokenStream filter = new FooFilter(source);
     ///     filter = new BarFilter(filter);
     ///     return new TokenStreamComponents(source, filter);
-    ///   }
-    /// };
-    /// </pre>
-    /// For more examples, see the <seealso cref="Lucene.Net.Analysis Analysis package documentation"/>.
-    /// <p>
+    /// });
+    /// </code>
+    /// For more examples, see the <see cref="Lucene.Net.Analysis"/> namespace documentation.
+    /// <para/>
     /// For some concrete implementations bundled with Lucene, look in the analysis modules:
-    /// <ul>
-    ///   <li><a href="{@docRoot}/../analyzers-common/overview-summary.html">Common</a>:
-    ///       Analyzers for indexing content in different languages and domains.
-    ///   <li><a href="{@docRoot}/../analyzers-icu/overview-summary.html">ICU</a>:
-    ///       Exposes functionality from ICU to Apache Lucene.
-    ///   <li><a href="{@docRoot}/../analyzers-kuromoji/overview-summary.html">Kuromoji</a>:
-    ///       Morphological analyzer for Japanese text.
-    ///   <li><a href="{@docRoot}/../analyzers-morfologik/overview-summary.html">Morfologik</a>:
-    ///       Dictionary-driven lemmatization for the Polish language.
-    ///   <li><a href="{@docRoot}/../analyzers-phonetic/overview-summary.html">Phonetic</a>:
-    ///       Analysis for indexing phonetic signatures (for sounds-alike search).
-    ///   <li><a href="{@docRoot}/../analyzers-smartcn/overview-summary.html">Smart Chinese</a>:
-    ///       Analyzer for Simplified Chinese, which indexes words.
-    ///   <li><a href="{@docRoot}/../analyzers-stempel/overview-summary.html">Stempel</a>:
-    ///       Algorithmic Stemmer for the Polish Language.
-    ///   <li><a href="{@docRoot}/../analyzers-uima/overview-summary.html">UIMA</a>:
-    ///       Analysis integration with Apache UIMA.
-    /// </ul>
+    /// <list type="bullet">
+    ///   <item>Common:
+    ///       Analyzers for indexing content in different languages and domains.</item>
+    ///   <item>ICU:
+    ///       Exposes functionality from ICU to Apache Lucene.</item>
+    ///   <item>Kuromoji:
+    ///       Morphological analyzer for Japanese text.</item>
+    ///   <item>Morfologik:
+    ///       Dictionary-driven lemmatization for the Polish language.</item>
+    ///   <item>Phonetic:
+    ///       Analysis for indexing phonetic signatures (for sounds-alike search).</item>
+    ///   <item>Smart Chinese:
+    ///       Analyzer for Simplified Chinese, which indexes words.</item>
+    ///   <item>Stempel:
+    ///       Algorithmic Stemmer for the Polish Language.</item>
+    ///   <item>UIMA:
+    ///       Analysis integration with Apache UIMA.</item>
+    /// </list>
     /// </summary>
     public abstract class Analyzer : IDisposable
     {
         private readonly ReuseStrategy reuseStrategy;
 
-        // non final as it gets nulled if closed; pkg private for access by ReuseStrategy's final helper methods:
+        // non readonly as it gets nulled if closed; internal for access by ReuseStrategy's final helper methods:
         internal DisposableThreadLocal<object> storedValue = new DisposableThreadLocal<object>();
 
         /// <summary>
-        /// Create a new Analyzer, reusing the same set of components per-thread
-        /// across calls to <seealso cref="#tokenStream(String, Reader)"/>.
+        /// Create a new <see cref="Analyzer"/>, reusing the same set of components per-thread
+        /// across calls to <see cref="TokenStream(string, TextReader)"/>.
         /// </summary>
         public Analyzer()
             : this(GLOBAL_REUSE_STRATEGY)
@@ -82,12 +81,12 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Expert: create a new Analyzer with a custom <seealso cref="ReuseStrategy"/>.
-        /// <p>
+        /// Expert: create a new Analyzer with a custom <see cref="ReuseStrategy"/>.
+        /// <para/>
         /// NOTE: if you just want to reuse on a per-field basis, its easier to
-        /// use a subclass of <seealso cref="AnalyzerWrapper"/> such as
-        /// <a href="{@docRoot}/../analyzers-common/Lucene.Net.Analysis/miscellaneous/PerFieldAnalyzerWrapper.html">
-        /// PerFieldAnalyerWrapper</a> instead.
+        /// use a subclass of <see cref="AnalyzerWrapper"/> such as
+        /// <c>Lucene.Net.Analysis.Common.Miscellaneous.PerFieldAnalyzerWrapper</c>
+        /// instead.
         /// </summary>
         public Analyzer(ReuseStrategy reuseStrategy)
         {
@@ -150,38 +149,38 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="TokenStreamComponents"/> instance for this analyzer.
+        /// Creates a new <see cref="TokenStreamComponents"/> instance for this analyzer.
         /// </summary>
         /// <param name="fieldName">
         ///          the name of the fields content passed to the
-        ///          <seealso cref="TokenStreamComponents"/> sink as a reader </param>
+        ///          <see cref="TokenStreamComponents"/> sink as a reader </param>
         /// <param name="reader">
-        ///          the reader passed to the <seealso cref="Tokenizer"/> constructor </param>
-        /// <returns> the <seealso cref="TokenStreamComponents"/> for this analyzer. </returns>
+        ///          the reader passed to the <see cref="Tokenizer"/> constructor </param>
+        /// <returns> the <see cref="TokenStreamComponents"/> for this analyzer. </returns>
         protected internal abstract TokenStreamComponents CreateComponents(string fieldName, TextReader reader);
 
         /// <summary>
-        /// Returns a TokenStream suitable for <code>fieldName</code>, tokenizing
-        /// the contents of <code>text</code>.
-        /// <p>
-        /// this method uses <seealso cref="#createComponents(String, Reader)"/> to obtain an
-        /// instance of <seealso cref="TokenStreamComponents"/>. It returns the sink of the
+        /// Returns a <see cref="TokenStream"/> suitable for <paramref name="fieldName"/>, tokenizing
+        /// the contents of <c>text</c>.
+        /// <para/>
+        /// This method uses <see cref="CreateComponents(string, TextReader)"/> to obtain an
+        /// instance of <see cref="TokenStreamComponents"/>. It returns the sink of the
         /// components and stores the components internally. Subsequent calls to this
         /// method will reuse the previously stored components after resetting them
-        /// through <seealso cref="TokenStreamComponents#setReader(Reader)"/>.
-        /// <p>
+        /// through <see cref="TokenStreamComponents.SetReader(TextReader)"/>.
+        /// <para/>
         /// <b>NOTE:</b> After calling this method, the consumer must follow the
-        /// workflow described in <seealso cref="TokenStream"/> to properly consume its contents.
-        /// See the <seealso cref="Lucene.Net.Analysis Analysis package documentation"/> for
+        /// workflow described in <see cref="Analysis.TokenStream"/> to properly consume its contents.
+        /// See the <see cref="Lucene.Net.Analysis"/> namespace documentation for
         /// some examples demonstrating this.
         /// </summary>
-        /// <param name="fieldName"> the name of the field the created TokenStream is used for </param>
-        /// <param name="text"> the String the streams source reads from </param>
-        /// <returns> TokenStream for iterating the analyzed content of <code>reader</code> </returns>
-        /// <exception cref="AlreadyClosedException"> if the Analyzer is closed. </exception>
+        /// <param name="fieldName"> the name of the field the created <see cref="Analysis.TokenStream"/> is used for </param>
+        /// <param name="reader"> the reader the streams source reads from </param>
+        /// <returns> <see cref="Analysis.TokenStream"/> for iterating the analyzed content of <see cref="TextReader"/> </returns>
+        /// <exception cref="AlreadyClosedException"> if the Analyzer is disposed. </exception>
         /// <exception cref="IOException"> if an i/o error occurs (may rarely happen for strings). </exception>
-        /// <seealso cref= #tokenStream(String, Reader) </seealso>
-        public TokenStream TokenStream(string fieldName, TextReader reader)
+        /// <seealso cref="TokenStream(string, string)"/>
+        public TokenStream TokenStream(string fieldName, TextReader reader) // LUCENENET TODO: Rename GetTokenStream ?
         {
             TokenStreamComponents components = reuseStrategy.GetReusableComponents(this, fieldName);
             TextReader r = InitReader(fieldName, reader);
@@ -197,7 +196,28 @@ namespace Lucene.Net.Analysis
             return components.TokenStream;
         }
 
-        public TokenStream TokenStream(string fieldName, string text)
+        /// <summary>
+        /// Returns a <see cref="Analysis.TokenStream"/> suitable for <paramref name="fieldName"/>, tokenizing
+        /// the contents of <paramref name="text"/>.
+        /// <para/>
+        /// This method uses <see cref="CreateComponents(string, TextReader)"/> to obtain an
+        /// instance of <see cref="TokenStreamComponents"/>. It returns the sink of the
+        /// components and stores the components internally. Subsequent calls to this
+        /// method will reuse the previously stored components after resetting them
+        /// through <see cref="TokenStreamComponents.SetReader(TextReader)"/>.
+        /// <para/>
+        /// <b>NOTE:</b> After calling this method, the consumer must follow the 
+        /// workflow described in <see cref="Analysis.TokenStream"/> to properly consume its contents.
+        /// See the <see cref="Lucene.Net.Analysis"/> namespace documentation for
+        /// some examples demonstrating this.
+        /// </summary>
+        /// <param name="fieldName">the name of the field the created <see cref="Analysis.TokenStream"/> is used for</param>
+        /// <param name="text">the <see cref="string"/> the streams source reads from </param>
+        /// <returns><see cref="Analysis.TokenStream"/> for iterating the analyzed content of <paramref name="reader"/></returns>
+        /// <exception cref="AlreadyClosedException"> if the Analyzer is disposed. </exception>
+        /// <exception cref="IOException"> if an i/o error occurs (may rarely happen for strings). </exception>
+        /// <seealso cref="TokenStream(string, TextReader)"/>
+        public TokenStream TokenStream(string fieldName, string text) // LUCENENET TODO: Rename GetTokenStream ?
         {
             TokenStreamComponents components = reuseStrategy.GetReusableComponents(this, fieldName);
             ReusableStringReader strReader =
@@ -220,53 +240,53 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Override this if you want to add a CharFilter chain.
-        /// <p>
-        /// The default implementation returns <code>reader</code>
+        /// Override this if you want to add a <see cref="CharFilter"/> chain.
+        /// <para/>
+        /// The default implementation returns <paramref name="reader"/>
         /// unchanged.
         /// </summary>
-        /// <param name="fieldName"> IndexableField name being indexed </param>
-        /// <param name="reader"> original TextReader </param>
-        /// <returns> reader, optionally decorated with CharFilter(s) </returns>
+        /// <param name="fieldName"> <see cref="Index.IIndexableField"/> name being indexed </param>
+        /// <param name="reader"> original <see cref="TextReader"/> </param>
+        /// <returns> reader, optionally decorated with <see cref="CharFilter"/>(s) </returns>
         protected internal virtual TextReader InitReader(string fieldName, TextReader reader)
         {
             return reader;
         }
 
         /// <summary>
-        /// Invoked before indexing a IndexableField instance if
-        /// terms have already been added to that field.  this allows custom
+        /// Invoked before indexing a <see cref="Index.IIndexableField"/> instance if
+        /// terms have already been added to that field.  This allows custom
         /// analyzers to place an automatic position increment gap between
-        /// IndexbleField instances using the same field name.  The default value
+        /// <see cref="Index.IIndexableField"/> instances using the same field name.  The default value
         /// position increment gap is 0.  With a 0 position increment gap and
         /// the typical default token position increment of 1, all terms in a field,
-        /// including across IndexableField instances, are in successive positions, allowing
-        /// exact PhraseQuery matches, for instance, across IndexableField instance boundaries.
+        /// including across <see cref="Index.IIndexableField"/> instances, are in successive positions, allowing
+        /// exact <see cref="Search.PhraseQuery"/> matches, for instance, across <see cref="Index.IIndexableField"/> instance boundaries.
         /// </summary>
-        /// <param name="fieldName"> IndexableField name being indexed. </param>
-        /// <returns> position increment gap, added to the next token emitted from <seealso cref="#tokenStream(String,Reader)"/>.
-        ///         this value must be {@code >= 0}. </returns>
+        /// <param name="fieldName"> <see cref="Index.IIndexableField"/> name being indexed. </param>
+        /// <returns> position increment gap, added to the next token emitted from <see cref="TokenStream(string, TextReader)"/>.
+        ///         this value must be <c>&gt;= 0</c>.</returns>
         public virtual int GetPositionIncrementGap(string fieldName)
         {
             return 0;
         }
 
         /// <summary>
-        /// Just like <seealso cref="#getPositionIncrementGap"/>, except for
-        /// Token offsets instead.  By default this returns 1.
+        /// Just like <see cref="GetPositionIncrementGap"/>, except for
+        /// <see cref="Token"/> offsets instead.  By default this returns 1.
         /// this method is only called if the field
         /// produced at least one token for indexing.
         /// </summary>
         /// <param name="fieldName"> the field just indexed </param>
-        /// <returns> offset gap, added to the next token emitted from <seealso cref="#tokenStream(String,Reader)"/>.
-        ///         this value must be {@code >= 0}. </returns>
+        /// <returns> offset gap, added to the next token emitted from <see cref="TokenStream(string, TextReader)"/>.
+        ///         this value must be <c>&gt;= 0</c>. </returns>
         public virtual int GetOffsetGap(string fieldName)
         {
             return 1;
         }
 
         /// <summary>
-        /// Returns the used <seealso cref="ReuseStrategy"/>.
+        /// Returns the used <see cref="ReuseStrategy"/>.
         /// </summary>
         public ReuseStrategy Strategy
         {
@@ -277,7 +297,7 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Frees persistent resources used by this Analyzer </summary>
+        /// Frees persistent resources used by this <see cref="Analyzer"/> </summary>
         public virtual void Dispose()
         {
             if (storedValue != null)
@@ -288,11 +308,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// this class encapsulates the outer components of a token stream. It provides
-        /// access to the source (<seealso cref="Tokenizer"/>) and the outer end (sink), an
-        /// instance of <seealso cref="TokenFilter"/> which also serves as the
-        /// <seealso cref="TokenStream"/> returned by
-        /// <seealso cref="Analyzer#tokenStream(String, Reader)"/>.
+        /// This class encapsulates the outer components of a token stream. It provides
+        /// access to the source (<see cref="Analysis.Tokenizer"/>) and the outer end (sink), an
+        /// instance of <see cref="TokenFilter"/> which also serves as the
+        /// <see cref="Analysis.TokenStream"/> returned by
+        /// <seealso cref="Analyzer.TokenStream(string, TextReader)"/>.
         /// </summary>
         public class TokenStreamComponents
         {
@@ -303,16 +323,16 @@ namespace Lucene.Net.Analysis
 
             /// <summary>
             /// Sink tokenstream, such as the outer tokenfilter decorating
-            /// the chain. this can be the source if there are no filters.
+            /// the chain. This can be the source if there are no filters.
             /// </summary>
             protected readonly TokenStream m_sink;
 
             /// <summary>
-            /// Internal cache only used by <seealso cref="Analyzer#tokenStream(String, String)"/>. </summary>
+            /// Internal cache only used by <see cref="Analyzer.TokenStream(string, string)"/>. </summary>
             internal ReusableStringReader reusableStringReader;
 
             /// <summary>
-            /// Creates a new <seealso cref="TokenStreamComponents"/> instance.
+            /// Creates a new <see cref="TokenStreamComponents"/> instance.
             /// </summary>
             /// <param name="source">
             ///          the analyzer's tokenizer </param>
@@ -325,7 +345,7 @@ namespace Lucene.Net.Analysis
             }
 
             /// <summary>
-            /// Creates a new <seealso cref="TokenStreamComponents"/> instance.
+            /// Creates a new <see cref="TokenStreamComponents"/> instance.
             /// </summary>
             /// <param name="source">
             ///          the analyzer's tokenizer </param>
@@ -349,9 +369,9 @@ namespace Lucene.Net.Analysis
             }
 
             /// <summary>
-            /// Returns the sink <seealso cref="TokenStream"/>
+            /// Returns the sink <see cref="Analysis.TokenStream"/>
             /// </summary>
-            /// <returns> the sink <seealso cref="TokenStream"/> </returns>
+            /// <returns> the sink <see cref="Analysis.TokenStream"/> </returns>
             public virtual TokenStream TokenStream
             {
                 get
@@ -361,9 +381,9 @@ namespace Lucene.Net.Analysis
             }
 
             /// <summary>
-            /// Returns the component's <seealso cref="Tokenizer"/>
+            /// Returns the component's <see cref="Analysis.Tokenizer"/>
             /// </summary>
-            /// <returns> Component's <seealso cref="Tokenizer"/> </returns>
+            /// <returns> Component's <see cref="Analysis.Tokenizer"/> </returns>
             public virtual Tokenizer Tokenizer
             {
                 get
@@ -374,36 +394,36 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Strategy defining how TokenStreamComponents are reused per call to
-        /// <seealso cref="Analyzer#tokenStream(String, java.io.Reader)"/>.
+        /// Strategy defining how <see cref="TokenStreamComponents"/> are reused per call to
+        /// <see cref="Analyzer.TokenStream(string, TextReader)"/>.
         /// </summary>
         public abstract class ReuseStrategy
         {
             /// <summary>
-            /// Gets the reusable TokenStreamComponents for the field with the given name.
+            /// Gets the reusable <see cref="TokenStreamComponents"/> for the field with the given name.
             /// </summary>
-            /// <param name="analyzer"> Analyzer from which to get the reused components. Use
-            ///        <seealso cref="#getStoredValue(Analyzer)"/> and <seealso cref="#setStoredValue(Analyzer, Object)"/>
-            ///        to access the data on the Analyzer. </param>
-            /// <param name="fieldName"> Name of the field whose reusable TokenStreamComponents
+            /// <param name="analyzer"> <see cref="Analyzer"/> from which to get the reused components. Use
+            ///        <see cref="GetStoredValue(Analyzer)"/> and <see cref="SetStoredValue(Analyzer, object)"/>
+            ///        to access the data on the <see cref="Analyzer"/>. </param>
+            /// <param name="fieldName"> Name of the field whose reusable <see cref="TokenStreamComponents"/>
             ///        are to be retrieved </param>
-            /// <returns> Reusable TokenStreamComponents for the field, or {@code null}
+            /// <returns> Reusable <see cref="TokenStreamComponents"/> for the field, or <c>null</c>
             ///         if there was no previous components for the field </returns>
             public abstract TokenStreamComponents GetReusableComponents(Analyzer analyzer, string fieldName);
 
             /// <summary>
-            /// Stores the given TokenStreamComponents as the reusable components for the
+            /// Stores the given <see cref="TokenStreamComponents"/> as the reusable components for the
             /// field with the give name.
             /// </summary>
-            /// <param name="fieldName"> Name of the field whose TokenStreamComponents are being set </param>
-            /// <param name="components"> TokenStreamComponents which are to be reused for the field </param>
+            /// <param name="fieldName"> Name of the field whose <see cref="TokenStreamComponents"/> are being set </param>
+            /// <param name="components"> <see cref="TokenStreamComponents"/> which are to be reused for the field </param>
             public abstract void SetReusableComponents(Analyzer analyzer, string fieldName, TokenStreamComponents components);
 
             /// <summary>
             /// Returns the currently stored value.
             /// </summary>
-            /// <returns> Currently stored value or {@code null} if no value is stored </returns>
-            /// <exception cref="AlreadyClosedException"> if the Analyzer is closed. </exception>
+            /// <returns> Currently stored value or <c>null</c> if no value is stored </returns>
+            /// <exception cref="AlreadyClosedException"> if the <see cref="Analyzer"/> is closed. </exception>
             protected internal object GetStoredValue(Analyzer analyzer)
             {
                 if (analyzer.storedValue == null)
@@ -417,7 +437,7 @@ namespace Lucene.Net.Analysis
             /// Sets the stored value.
             /// </summary>
             /// <param name="storedValue"> Value to store </param>
-            /// <exception cref="AlreadyClosedException"> if the Analyzer is closed. </exception>
+            /// <exception cref="AlreadyClosedException"> if the <see cref="Analyzer"/> is closed. </exception>
             protected internal void SetStoredValue(Analyzer analyzer, object storedValue)
             {
                 if (analyzer.storedValue == null)
@@ -429,7 +449,7 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// A predefined <seealso cref="ReuseStrategy"/>  that reuses the same components for
+        /// A predefined <see cref="ReuseStrategy"/>  that reuses the same components for
         /// every field.
         /// </summary>
         public static readonly ReuseStrategy GLOBAL_REUSE_STRATEGY =
@@ -438,16 +458,17 @@ namespace Lucene.Net.Analysis
 #pragma warning restore 612, 618
 
         /// <summary>
-        /// Implementation of <seealso cref="ReuseStrategy"/> that reuses the same components for
+        /// Implementation of <see cref="ReuseStrategy"/> that reuses the same components for
         /// every field. </summary>
-        /// @deprecated this implementation class will be hidden in Lucene 5.0.
-        ///   Use <seealso cref="Analyzer#GLOBAL_REUSE_STRATEGY"/> instead!
-        [Obsolete("this implementation class will be hidden in Lucene 5.0.")]
+        [Obsolete("this implementation class will be hidden in Lucene 5.0. Use Analyzer.GLOBAL_REUSE_STRATEGY instead!")]
         public sealed class GlobalReuseStrategy : ReuseStrategy
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass constructors, typically implicit.) </summary>
-        /// @deprecated Don't create instances of this class, use <seealso cref="Analyzer#GLOBAL_REUSE_STRATEGY"/>
         {
+            /// <summary>
+            /// Sole constructor. (For invocation by subclass constructors, typically implicit.) </summary>
+            [Obsolete("Don't create instances of this class, use Analyzer.GLOBAL_REUSE_STRATEGY")]
+            public GlobalReuseStrategy()
+            { }
+
             public override TokenStreamComponents GetReusableComponents(Analyzer analyzer, string fieldName)
             {
                 return (TokenStreamComponents)GetStoredValue(analyzer);
@@ -460,8 +481,8 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// A predefined <seealso cref="ReuseStrategy"/> that reuses components per-field by
-        /// maintaining a Map of TokenStreamComponent per field name.
+        /// A predefined <see cref="ReuseStrategy"/> that reuses components per-field by
+        /// maintaining a Map of <see cref="TokenStreamComponents"/> per field name.
         /// </summary>
         public static readonly ReuseStrategy PER_FIELD_REUSE_STRATEGY =
 #pragma warning disable 612, 618
@@ -469,17 +490,17 @@ namespace Lucene.Net.Analysis
 #pragma warning restore 612, 618
 
         /// <summary>
-        /// Implementation of <seealso cref="ReuseStrategy"/> that reuses components per-field by
-        /// maintaining a Map of TokenStreamComponent per field name. </summary>
-        /// @deprecated this implementation class will be hidden in Lucene 5.0.
-        ///   Use <seealso cref="Analyzer#PER_FIELD_REUSE_STRATEGY"/> instead!
-        [Obsolete("this implementation class will be hidden in Lucene 5.0.")]
+        /// Implementation of <see cref="ReuseStrategy"/> that reuses components per-field by
+        /// maintaining a Map of <see cref="TokenStreamComponents"/> per field name.
+        /// </summary>
+        [Obsolete("this implementation class will be hidden in Lucene 5.0. Use Analyzer.PER_FIELD_REUSE_STRATEGY instead!")]
         public class PerFieldReuseStrategy : ReuseStrategy
+
         /// <summary>
-        /// Sole constructor. (For invocation by subclass constructors, typically implicit.) </summary>
-        /// @deprecated Don't create instances of this class, use <seealso cref="Analyzer#PER_FIELD_REUSE_STRATEGY"/>
+        /// Sole constructor. (For invocation by subclass constructors, typically implicit.)
+        /// </summary>
         {
-            [Obsolete]
+            [Obsolete("Don't create instances of this class, use Analyzer.PER_FIELD_REUSE_STRATEGY")]
             public PerFieldReuseStrategy()
             {
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/AnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/AnalyzerWrapper.cs b/src/Lucene.Net.Core/Analysis/AnalyzerWrapper.cs
index 17308e7..de40185 100644
--- a/src/Lucene.Net.Core/Analysis/AnalyzerWrapper.cs
+++ b/src/Lucene.Net.Core/Analysis/AnalyzerWrapper.cs
@@ -21,70 +21,69 @@ namespace Lucene.Net.Analysis
      */
 
     /// <summary>
-    /// Extension to <seealso cref="Analyzer"/> suitable for Analyzers which wrap
-    /// other Analyzers.
-    /// <p/>
-    /// <seealso cref="#getWrappedAnalyzer(String)"/> allows the Analyzer
-    /// to wrap multiple Analyzers which are selected on a per field basis.
-    /// <p/>
-    /// <seealso cref="#wrapComponents(String, Analyzer.TokenStreamComponents)"/> allows the
-    /// TokenStreamComponents of the wrapped Analyzer to then be wrapped
-    /// (such as adding a new <seealso cref="TokenFilter"/> to form new TokenStreamComponents.
+    /// Extension to <see cref="Analyzer"/> suitable for <see cref="Analyzer"/>s which wrap
+    /// other <see cref="Analyzer"/>s.
+    /// <para/>
+    /// <see cref="GetWrappedAnalyzer(string)"/> allows the <see cref="Analyzer"/>
+    /// to wrap multiple <see cref="Analyzer"/>s which are selected on a per field basis.
+    /// <para/>
+    /// <see cref="WrapComponents(string, Analyzer.TokenStreamComponents)"/> allows the
+    /// <see cref="Analyzer.TokenStreamComponents"/> of the wrapped <see cref="Analyzer"/> to then be wrapped
+    /// (such as adding a new <see cref="TokenFilter"/> to form new <see cref="Analyzer.TokenStreamComponents"/>).
     /// </summary>
     public abstract class AnalyzerWrapper : Analyzer
     {
         /// <summary>
-        /// Creates a new AnalyzerWrapper.  Since the <seealso cref="Analyzer.ReuseStrategy"/> of
-        /// the wrapped Analyzers are unknown, <seealso cref="#PER_FIELD_REUSE_STRATEGY"/> is assumed. </summary>
-        /// @deprecated Use <seealso cref="#AnalyzerWrapper(Analyzer.ReuseStrategy)"/>
-        /// and specify a valid <seealso cref="Analyzer.ReuseStrategy"/>, probably retrieved from the
-        /// wrapped analyzer using <seealso cref="#getReuseStrategy()"/>.
-        [Obsolete]
+        /// Creates a new <see cref="AnalyzerWrapper"/>.  Since the <see cref="Analyzer.ReuseStrategy"/> of
+        /// the wrapped <see cref="Analyzer"/>s are unknown, <see cref="Analyzer.PER_FIELD_REUSE_STRATEGY"/> is assumed.
+        /// </summary>
+        [Obsolete("Use AnalyzerWrapper(Analyzer.ReuseStrategy) and specify a valid Analyzer.ReuseStrategy, probably retrieved from the wrapped analyzer using Analyzer.Strategy.")]
         protected internal AnalyzerWrapper()
             : this(PER_FIELD_REUSE_STRATEGY)
         {
         }
 
         /// <summary>
-        /// Creates a new AnalyzerWrapper with the given reuse strategy.
-        /// <p>If you want to wrap a single delegate Analyzer you can probably
+        /// Creates a new <see cref="AnalyzerWrapper"/> with the given reuse strategy.
+        /// <para/>If you want to wrap a single delegate <see cref="Analyzer"/> you can probably
         /// reuse its strategy when instantiating this subclass:
-        /// {@code super(delegate.getReuseStrategy());}.
-        /// <p>If you choose different analyzers per field, use
-        /// <seealso cref="#PER_FIELD_REUSE_STRATEGY"/>. </summary>
-        /// <seealso cref= #getReuseStrategy() </seealso>
+        /// <c>base(innerAnalyzer.Strategy)</c>.
+        /// <para/>If you choose different analyzers per field, use
+        /// <see cref="Analyzer.PER_FIELD_REUSE_STRATEGY"/>.
+        /// </summary>
+        /// <seealso cref="Analyzer.Strategy"/>
         protected internal AnalyzerWrapper(ReuseStrategy reuseStrategy)
             : base(reuseStrategy)
         {
         }
 
         /// <summary>
-        /// Retrieves the wrapped Analyzer appropriate for analyzing the field with
+        /// Retrieves the wrapped <see cref="Analyzer"/> appropriate for analyzing the field with
         /// the given name
         /// </summary>
         /// <param name="fieldName"> Name of the field which is to be analyzed </param>
-        /// <returns> Analyzer for the field with the given name.  Assumed to be non-null </returns>
+        /// <returns> <see cref="Analyzer"/> for the field with the given name.  Assumed to be non-null </returns>
         protected abstract Analyzer GetWrappedAnalyzer(string fieldName);
 
         /// <summary>
-        /// Wraps / alters the given TokenStreamComponents, taken from the wrapped
-        /// Analyzer, to form new components. It is through this method that new
-        /// TokenFilters can be added by AnalyzerWrappers. By default, the given
+        /// Wraps / alters the given <see cref="Analyzer.TokenStreamComponents"/>, taken from the wrapped
+        /// <see cref="Analyzer"/>, to form new components. It is through this method that new
+        /// <see cref="TokenFilter"/>s can be added by <see cref="AnalyzerWrapper"/>s. By default, the given
         /// components are returned.
         /// </summary>
         /// <param name="fieldName">
         ///          Name of the field which is to be analyzed </param>
         /// <param name="components">
-        ///          TokenStreamComponents taken from the wrapped Analyzer </param>
-        /// <returns> Wrapped / altered TokenStreamComponents. </returns>
+        ///          <see cref="Analyzer.TokenStreamComponents"/> taken from the wrapped <see cref="Analyzer"/> </param>
+        /// <returns> Wrapped / altered <see cref="Analyzer.TokenStreamComponents"/>. </returns>
         protected virtual TokenStreamComponents WrapComponents(string fieldName, TokenStreamComponents components)
         {
             return components;
         }
 
         /// <summary>
-        /// Wraps / alters the given TextReader. Through this method AnalyzerWrappers can
-        /// implement <seealso cref="#initReader(String, Reader)"/>. By default, the given reader
+        /// Wraps / alters the given <see cref="TextReader"/>. Through this method <see cref="AnalyzerWrapper"/>s can
+        /// implement <see cref="InitReader(string, TextReader)"/>. By default, the given reader
         /// is returned.
         /// </summary>
         /// <param name="fieldName">

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/CachingTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/CachingTokenFilter.cs b/src/Lucene.Net.Core/Analysis/CachingTokenFilter.cs
index 5f868af..7fed679 100644
--- a/src/Lucene.Net.Core/Analysis/CachingTokenFilter.cs
+++ b/src/Lucene.Net.Core/Analysis/CachingTokenFilter.cs
@@ -22,13 +22,13 @@ namespace Lucene.Net.Analysis
     using AttributeSource = Lucene.Net.Util.AttributeSource;
 
     /// <summary>
-    /// this class can be used if the token attributes of a TokenStream
+    /// This class can be used if the token attributes of a <see cref="TokenStream"/>
     /// are intended to be consumed more than once. It caches
     /// all token attribute states locally in a List.
     ///
-    /// <P>CachingTokenFilter implements the optional method
-    /// <seealso cref="TokenStream#reset()"/>, which repositions the
-    /// stream to the first Token.
+    /// <para/><see cref="CachingTokenFilter"/> implements the optional method
+    /// <see cref="TokenStream.Reset()"/>, which repositions the
+    /// stream to the first <see cref="Token"/>.
     /// </summary>
     public sealed class CachingTokenFilter : TokenFilter
     {
@@ -37,9 +37,9 @@ namespace Lucene.Net.Analysis
         private AttributeSource.State finalState;
 
         /// <summary>
-        /// Create a new CachingTokenFilter around <code>input</code>,
+        /// Create a new <see cref="CachingTokenFilter"/> around <paramref name="input"/>,
         /// caching its token attributes, which can be replayed again
-        /// after a call to <seealso cref="#reset()"/>.
+        /// after a call to <see cref="Reset()"/>.
         /// </summary>
         public CachingTokenFilter(TokenStream input)
             : base(input)
@@ -76,10 +76,10 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Rewinds the iterator to the beginning of the cached list.
-        /// <p>
-        /// Note that this does not call reset() on the wrapped tokenstream ever, even
-        /// the first time. You should reset() the inner tokenstream before wrapping
-        /// it with CachingTokenFilter.
+        /// <para/>
+        /// Note that this does not call <see cref="Reset()"/> on the wrapped tokenstream ever, even
+        /// the first time. You should <see cref="Reset()"/> the inner tokenstream before wrapping
+        /// it with <see cref="CachingTokenFilter"/>.
         /// </summary>
         public override void Reset()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/CharFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/CharFilter.cs b/src/Lucene.Net.Core/Analysis/CharFilter.cs
index 23c10af..460fb79 100644
--- a/src/Lucene.Net.Core/Analysis/CharFilter.cs
+++ b/src/Lucene.Net.Core/Analysis/CharFilter.cs
@@ -21,21 +21,20 @@ namespace Lucene.Net.Analysis
      */
 
     /// <summary>
-    /// Subclasses of CharFilter can be chained to filter a TextReader
-    /// They can be used as <seealso cref="TextReader"/> with additional offset
-    /// correction. <seealso cref="Tokenizer"/>s will automatically use <seealso cref="#correctOffset"/>
-    /// if a CharFilter subclass is used.
-    /// <p>
-    /// this class is abstract: at a minimum you must implement <seealso cref="#read(char[], int, int)"/>,
-    /// transforming the input in some way from <seealso cref="#input"/>, and <seealso cref="#correct(int)"/>
+    /// Subclasses of <see cref="CharFilter"/> can be chained to filter a <see cref="TextReader"/>
+    /// They can be used as <see cref="TextReader"/> with additional offset
+    /// correction. <see cref="Tokenizer"/>s will automatically use <see cref="CorrectOffset"/>
+    /// if a <see cref="CharFilter"/> subclass is used.
+    /// <para/>
+    /// This class is abstract: at a minimum you must implement <see cref="TextReader.Read(char[], int, int)"/>,
+    /// transforming the input in some way from <see cref="m_input"/>, and <seealso cref="Correct(int)"/>
     /// to adjust the offsets to match the originals.
-    /// <p>
+    /// <para/>
     /// You can optionally provide more efficient implementations of additional methods
-    /// like <seealso cref="#read()"/>, <seealso cref="#read(char[])"/>, <seealso cref="#read(java.nio.CharBuffer)"/>,
-    /// but this is not required.
-    /// <p>
-    /// For examples and integration with <seealso cref="Analyzer"/>, see the
-    /// <seealso cref="Lucene.Net.Analysis Analysis package documentation"/>.
+    /// like <see cref="TextReader.Read()"/>, but this is not required.
+    /// <para/>
+    /// For examples and integration with <see cref="Analyzer"/>, see the
+    /// <see cref="Lucene.Net.Analysis"/> namespace documentation.
     /// </summary>
     // the way java.io.FilterReader should work!
     public abstract class CharFilter : TextReader
@@ -46,8 +45,8 @@ namespace Lucene.Net.Analysis
         protected internal readonly TextReader m_input;
 
         /// <summary>
-        /// Create a new CharFilter wrapping the provided reader. </summary>
-        /// <param name="input"> a TextReader, can also be a CharFilter for chaining. </param>
+        /// Create a new <see cref="CharFilter"/> wrapping the provided reader. </summary>
+        /// <param name="input"> a <see cref="TextReader"/>, can also be a <see cref="CharFilter"/> for chaining. </param>
         public CharFilter(TextReader input)
         {
             this.m_input = input;
@@ -55,10 +54,10 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Closes the underlying input stream.
-        /// <p>
+        /// <para/>
         /// <b>NOTE:</b>
-        /// The default implementation closes the input TextReader, so
-        /// be sure to call <code>super.Dispose()</code> when overriding this method.
+        /// The default implementation closes the input <see cref="TextReader"/>, so
+        /// be sure to call <c>base.Dispose()</c> when overriding this method.
         /// </summary>
         protected override void Dispose(bool disposing)
         {
@@ -75,7 +74,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Chains the corrected offset through the input
-        /// CharFilter(s).
+        /// <see cref="CharFilter"/>(s).
         /// </summary>
         public int CorrectOffset(int currentOff)
         {
@@ -110,9 +109,10 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Tells whether this stream is ready to be read.
-        /// 
-        /// True if the next read() is guaranteed not to block for input, false otherwise. Note that returning false does not guarantee that the next read will block.
-        /// 
+        /// <para/>
+        /// True if the next <see cref="TextReader.Read()"/> is guaranteed not to block for input, false otherwise. Note 
+        /// that returning false does not guarantee that the next read will block.
+        /// <para/>
         /// LUCENENET specific. Moved here from the Java Reader class so it can be overridden to provide reader buffering.
         /// </summary>
         public virtual bool Ready()
@@ -121,8 +121,9 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Tells whether this stream supports the mark() operation. The default implementation always returns false. Subclasses should override this method.
-        /// 
+        /// Tells whether this stream supports the <see cref="Mark(int)"/> operation. The default implementation always 
+        /// returns false. Subclasses should override this method.
+        /// <para/>
         /// LUCENENET specific. Moved here from the Java Reader class so it can be overridden to provide reader buffering.
         /// </summary>
         /// <returns>true if and only if this stream supports the mark operation.</returns>
@@ -132,8 +133,9 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Marks the present position in the stream. Subsequent calls to reset() will attempt to reposition the stream to this point. Not all character-input streams support the mark() operation.
-        /// 
+        /// Marks the present position in the stream. Subsequent calls to <see cref="Reset"/> will attempt to 
+        /// reposition the stream to this point. Not all character-input streams support the <see cref="Mark(int)"/> operation.
+        /// <para/>
         /// LUCENENET specific. Moved here from the Java Reader class so it can be overridden to provide reader buffering.
         /// </summary>
         /// <param name="readAheadLimit">Limit on the number of characters that may be read while still preserving the mark. After 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/NumericTokenStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/NumericTokenStream.cs b/src/Lucene.Net.Core/Analysis/NumericTokenStream.cs
index 039c171..8615738 100644
--- a/src/Lucene.Net.Core/Analysis/NumericTokenStream.cs
+++ b/src/Lucene.Net.Core/Analysis/NumericTokenStream.cs
@@ -24,58 +24,62 @@ namespace Lucene.Net.Analysis
      */
 
     /// <summary>
-    /// <b>Expert:</b> this class provides a <seealso cref="TokenStream"/>
-    /// for indexing numeric values that can be used by {@link
-    /// NumericRangeQuery} or <seealso cref="NumericRangeFilter"/>.
+    /// <b>Expert:</b> this class provides a <see cref="TokenStream"/>
+    /// for indexing numeric values that can be used by <see cref="Search.NumericRangeQuery"/>
+    /// or <see cref="Search.NumericRangeFilter"/>.
     ///
-    /// <p>Note that for simple usage, <seealso cref="IntField"/>, {@link
-    /// LongField}, <seealso cref="FloatField"/> or <seealso cref="DoubleField"/> is
+    /// <para/>Note that for simple usage, <see cref="Documents.Int32Field"/>, <see cref="Documents.Int64Field"/>, 
+    /// <see cref="Documents.SingleField"/> or <see cref="Documents.DoubleField"/> is
     /// recommended.  These fields disable norms and
     /// term freqs, as they are not usually needed during
     /// searching.  If you need to change these settings, you
     /// should use this class.
     ///
-    /// <p>Here's an example usage, for an <code>int</code> field:
+    /// <para/>Here's an example usage, for an <see cref="int"/> field:
     ///
-    /// <pre class="prettyprint">
-    ///  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
-    ///  fieldType.setOmitNorms(true);
-    ///  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
-    ///  Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value), fieldType);
-    ///  document.add(field);
-    /// </pre>
+    /// <code>
+    ///     FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED)
+    ///     {
+    ///         OmitNorms = true,
+    ///         IndexOptions = IndexOptions.DOCS_ONLY
+    ///     };
+    ///     Field field = new Field(name, new NumericTokenStream(precisionStep).SetInt32Value(value), fieldType);
+    ///     document.Add(field);
+    /// </code>
     ///
-    /// <p>For optimal performance, re-use the TokenStream and Field instance
+    /// <para/>For optimal performance, re-use the <see cref="TokenStream"/> and <see cref="Documents.Field"/> instance
     /// for more than one document:
     ///
-    /// <pre class="prettyprint">
-    ///  NumericTokenStream stream = new NumericTokenStream(precisionStep);
-    ///  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
-    ///  fieldType.setOmitNorms(true);
-    ///  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
-    ///  Field field = new Field(name, stream, fieldType);
-    ///  Document document = new Document();
-    ///  document.add(field);
+    /// <code>
+    ///     NumericTokenStream stream = new NumericTokenStream(precisionStep);
+    ///     FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED)
+    ///     {
+    ///         OmitNorms = true,
+    ///         IndexOptions = IndexOptions.DOCS_ONLY
+    ///     };
+    ///     Field field = new Field(name, stream, fieldType);
+    ///     Document document = new Document();
+    ///     document.Add(field);
     ///
-    ///  for(all documents) {
-    ///    stream.setIntValue(value)
-    ///    writer.addDocument(document);
-    ///  }
-    /// </pre>
+    ///     for(all documents) 
+    ///     {
+    ///         stream.SetInt32Value(value)
+    ///         writer.AddDocument(document);
+    ///     }
+    /// </code>
     ///
-    /// <p>this stream is not intended to be used in analyzers;
+    /// <para>this stream is not intended to be used in analyzers;
     /// it's more for iterating the different precisions during
-    /// indexing a specific numeric value.</p>
+    /// indexing a specific numeric value.</para>
     ///
-    /// <p><b>NOTE</b>: as token streams are only consumed once
+    /// <para><b>NOTE</b>: as token streams are only consumed once
     /// the document is added to the index, if you index more
-    /// than one numeric field, use a separate <code>NumericTokenStream</code>
-    /// instance for each.</p>
+    /// than one numeric field, use a separate <see cref="NumericTokenStream"/>
+    /// instance for each.</para>
     ///
-    /// <p>See <seealso cref="NumericRangeQuery"/> for more details on the
-    /// <a
-    /// href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
-    /// parameter as well as how numeric fields work under the hood.</p>
+    /// <para>See <see cref="Search.NumericRangeQuery"/> for more details on the
+    /// <c>precisionStep</c> parameter as well as how numeric fields work under the hood.
+    /// </para>
     ///
     /// @since 2.9
     /// </summary>
@@ -108,11 +112,11 @@ namespace Lucene.Net.Analysis
             int Shift { get; set; }
 
             /// <summary>
-            /// Returns current token's raw value as {@code long} with all <seealso cref="#getShift"/> applied, undefined before first token </summary>
+            /// Returns current token's raw value as <see cref="long"/> with all <see cref="Shift"/> applied, undefined before first token </summary>
             long RawValue { get; }
 
             /// <summary>
-            /// Returns value size in bits (32 for {@code float}, {@code int}; 64 for {@code double}, {@code long}) </summary>
+            /// Returns value size in bits (32 for <see cref="float"/>, <see cref="int"/>; 64 for <see cref="double"/>, <see cref="long"/>) </summary>
             int ValueSize { get; }
 
             /// <summary>
@@ -150,7 +154,7 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Implementation of <seealso cref="NumericTermAttribute"/>.
+        /// Implementation of <see cref="INumericTermAttribute"/>.
         /// @lucene.internal
         /// @since 4.0
         /// </summary>
@@ -160,6 +164,10 @@ namespace Lucene.Net.Analysis
             private int _precisionStep = 0;
             private readonly BytesRef _bytes = new BytesRef();
 
+            /// <summary>
+            /// Creates, but does not yet initialize this attribute instance
+            /// </summary>
+            /// <seealso cref="Init(long, int, int, int)"/>
             public NumericTermAttribute()
             {
                 ValueSize = 0;
@@ -234,9 +242,9 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Creates a token stream for numeric values using the default <code>precisionStep</code>
-        /// <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4). The stream is not yet initialized,
-        /// before using set a value using the various set<em>???</em>Value() methods.
+        /// Creates a token stream for numeric values using the default <seealso cref="precisionStep"/>
+        /// <see cref="NumericUtils.PRECISION_STEP_DEFAULT"/> (4). The stream is not yet initialized,
+        /// before using set a value using the various Set<em>???</em>Value() methods.
         /// </summary>
         public NumericTokenStream()
             : this(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, NumericUtils.PRECISION_STEP_DEFAULT)
@@ -246,8 +254,8 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Creates a token stream for numeric values with the specified
-        /// <code>precisionStep</code>. The stream is not yet initialized,
-        /// before using set a value using the various set<em>???</em>Value() methods.
+        /// <paramref name="precisionStep"/>. The stream is not yet initialized,
+        /// before using set a value using the various Set<em>???</em>Value() methods.
         /// </summary>
         public NumericTokenStream(int precisionStep)
             : this(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, precisionStep)
@@ -257,10 +265,10 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Expert: Creates a token stream for numeric values with the specified
-        /// <code>precisionStep</code> using the given
-        /// <seealso cref="Lucene.Net.Util.AttributeSource.AttributeFactory"/>.
+        /// <paramref name="precisionStep"/> using the given
+        /// <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory"/>.
         /// The stream is not yet initialized,
-        /// before using set a value using the various set<em>???</em>Value() methods.
+        /// before using set a value using the various Set<em>???</em>Value() methods.
         /// </summary>
         public NumericTokenStream(AttributeSource.AttributeFactory factory, int precisionStep)
             : base(new NumericAttributeFactory(factory))
@@ -275,13 +283,13 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Initializes the token stream with the supplied <code>long</code> value. 
+        /// Initializes the token stream with the supplied <see cref="long"/> value. 
         /// <para/>
         /// NOTE: This was setLongValue() in Lucene
         /// </summary>
-        /// <param name="value"> the value, for which this TokenStream should enumerate tokens. </param>
+        /// <param name="value"> the value, for which this <see cref=""TokenStream/> should enumerate tokens. </param>
         /// <returns> this instance, because of this you can use it the following way:
-        /// <code>new Field(name, new NumericTokenStream(precisionStep).setLongValue(value))</code> </returns>
+        /// <code>new Field(name, new NumericTokenStream(precisionStep).SetInt64Value(value))</code> </returns>
         public NumericTokenStream SetInt64Value(long value)
         {
             numericAtt.Init(value, valSize = 64, precisionStep, -precisionStep);
@@ -289,13 +297,13 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Initializes the token stream with the supplied <code>int</code> value.
+        /// Initializes the token stream with the supplied <see cref="int"/> value.
         /// <para/>
         /// NOTE: This was setIntValue() in Lucene
         /// </summary>
-        /// <param name="value"> the value, for which this TokenStream should enumerate tokens. </param>
+        /// <param name="value"> the value, for which this <see cref="TokenStream"/> should enumerate tokens. </param>
         /// <returns> this instance, because of this you can use it the following way:
-        /// <code>new Field(name, new NumericTokenStream(precisionStep).setIntValue(value))</code> </returns>
+        /// <code>new Field(name, new NumericTokenStream(precisionStep).SetInt32Value(value))</code> </returns>
         public NumericTokenStream SetInt32Value(int value)
         {
             numericAtt.Init(value, valSize = 32, precisionStep, -precisionStep);
@@ -303,10 +311,10 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Initializes the token stream with the supplied <code>double</code> value. </summary>
-        /// <param name="value"> the value, for which this TokenStream should enumerate tokens. </param>
+        /// Initializes the token stream with the supplied <see cref="double"/> value. </summary>
+        /// <param name="value"> the value, for which this <see cref="TokenStream"/> should enumerate tokens. </param>
         /// <returns> this instance, because of this you can use it the following way:
-        /// <code>new Field(name, new NumericTokenStream(precisionStep).setDoubleValue(value))</code> </returns>
+        /// <code>new Field(name, new NumericTokenStream(precisionStep).SetDoubleValue(value))</code> </returns>
         public NumericTokenStream SetDoubleValue(double value)
         {
             numericAtt.Init(NumericUtils.DoubleToSortableInt64(value), valSize = 64, precisionStep, -precisionStep);
@@ -314,13 +322,13 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Initializes the token stream with the supplied <code>float</code> value. 
+        /// Initializes the token stream with the supplied <see cref="float"/> value. 
         /// <para/>
         /// NOTE: This was setFloatValue() in Lucene
         /// </summary>
-        /// <param name="value"> the value, for which this TokenStream should enumerate tokens. </param>
+        /// <param name="value"> the value, for which this <see cref="TokenStream"/> should enumerate tokens. </param>
         /// <returns> this instance, because of this you can use it the following way:
-        /// <code>new Field(name, new NumericTokenStream(precisionStep).setFloatValue(value))</code> </returns>
+        /// <code>new Field(name, new NumericTokenStream(precisionStep).SetSingleValue(value))</code> </returns>
         public NumericTokenStream SetSingleValue(float value)
         {
             numericAtt.Init(NumericUtils.SingleToSortableInt32(value), valSize = 32, precisionStep, -precisionStep);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs b/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs
index 0f10364..e764ab1 100644
--- a/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs
+++ b/src/Lucene.Net.Core/Analysis/ReusableStringReader.cs
@@ -20,7 +20,8 @@ namespace Lucene.Net.Analysis
      */
 
     /// <summary>
-    /// Internal class to enable reuse of the string reader by <seealso cref="Analyzer#tokenStream(String,String)"/> </summary>
+    /// Internal class to enable reuse of the string reader by <see cref="Analyzer.TokenStream(string, string)"/>
+    /// </summary>
     public sealed class ReusableStringReader : System.IO.TextReader
     {
         private int pos = 0, size = 0;


Mime
View raw message