lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [07/27] lucenenet git commit: Lucene.Net.Core.Analysis: Cleaned up documentation comments
Date Thu, 02 Mar 2017 01:10:50 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/Token.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/Token.cs b/src/Lucene.Net.Core/Analysis/Token.cs
index da3906f..71ef711 100644
--- a/src/Lucene.Net.Core/Analysis/Token.cs
+++ b/src/Lucene.Net.Core/Analysis/Token.cs
@@ -28,92 +28,93 @@ namespace Lucene.Net.Analysis
     using IAttributeReflector = Lucene.Net.Util.IAttributeReflector;
 
     /// <summary>
-    ///  A Token is an occurrence of a term from the text of a field.  It consists of
-    ///  a term's text, the start and end offset of the term in the text of the field,
-    ///  and a type string.
-    ///  <p>
-    ///  The start and end offsets permit applications to re-associate a token with
-    ///  its source text, e.g., to display highlighted query terms in a document
-    ///  browser, or to show matching text fragments in a <abbr title="KeyWord In Context">KWIC</abbr>
-    ///  display, etc.
-    ///  <p>
-    ///  The type is a string, assigned by a lexical analyzer
-    ///  (a.k.a. tokenizer), naming the lexical or syntactic class that the token
-    ///  belongs to.  For example an end of sentence marker token might be implemented
-    ///  with type "eos".  The default token type is "word".
-    ///  <p>
-    ///  A Token can optionally have metadata (a.k.a. payload) in the form of a variable
-    ///  length byte array. Use <seealso cref="DocsAndPositionsEnum#getPayload()"/> to retrieve the
-    ///  payloads from the index.
+    /// A <see cref="Token"/> is an occurrence of a term from the text of a field.  It consists of
+    /// a term's text, the start and end offset of the term in the text of the field,
+    /// and a type string.
+    /// <para/>
+    /// The start and end offsets permit applications to re-associate a token with
+    /// its source text, e.g., to display highlighted query terms in a document
+    /// browser, or to show matching text fragments in a KWIC (KeyWord In Context)
+    /// display, etc.
+    /// <para/>
+    /// The type is a string, assigned by a lexical analyzer
+    /// (a.k.a. tokenizer), naming the lexical or syntactic class that the token
+    /// belongs to.  For example an end of sentence marker token might be implemented
+    /// with type "eos".  The default token type is "word".
+    /// <para/>
+    /// A Token can optionally have metadata (a.k.a. payload) in the form of a variable
+    /// length byte array. Use <see cref="Index.DocsAndPositionsEnum.GetPayload()"/> to retrieve the
+    /// payloads from the index.
     ///
-    ///  <br><br>
+    /// <para/><para/>
     ///
-    ///  <p><b>NOTE:</b> As of 2.9, Token implements all <seealso cref="Attribute"/> interfaces
-    ///  that are part of core Lucene and can be found in the {@code tokenattributes} subpackage.
-    ///  Even though it is not necessary to use Token anymore, with the new TokenStream API it can
-    ///  be used as convenience class that implements all <seealso cref="Attribute"/>s, which is especially useful
-    ///  to easily switch from the old to the new TokenStream API.
+    /// <para/><b>NOTE:</b> As of 2.9, Token implements all <see cref="IAttribute"/> interfaces
+    /// that are part of core Lucene and can be found in the <see cref="TokenAttributes"/> namespace.
+    /// Even though it is not necessary to use <see cref="Token"/> anymore, with the new <see cref="TokenStream"/> API it can
+    /// be used as convenience class that implements all <see cref="IAttribute"/>s, which is especially useful
+    /// to easily switch from the old to the new <see cref="TokenStream"/> API.
     ///
-    ///  <br><br>
+    /// <para/><para/>
     ///
-    ///  <p>Tokenizers and TokenFilters should try to re-use a Token
-    ///  instance when possible for best performance, by
-    ///  implementing the <seealso cref="TokenStream#IncrementToken()"/> API.
-    ///  Failing that, to create a new Token you should first use
-    ///  one of the constructors that starts with null text.  To load
-    ///  the token from a char[] use <seealso cref="#copyBuffer(char[], int, int)"/>.
-    ///  To load from a String use <seealso cref="#SetEmpty()"/> followed by <seealso cref="#append(CharSequence)"/> or <seealso cref="#append(CharSequence, int, int)"/>.
-    ///  Alternatively you can get the Token's termBuffer by calling either <seealso cref="#buffer()"/>,
-    ///  if you know that your text is shorter than the capacity of the termBuffer
-    ///  or <seealso cref="#resizeBuffer(int)"/>, if there is any possibility
-    ///  that you may need to grow the buffer. Fill in the characters of your term into this
-    ///  buffer, with <seealso cref="string#getChars(int, int, char[], int)"/> if loading from a string,
-    ///  or with <seealso cref="System#arraycopy(Object, int, Object, int, int)"/>, and finally call <seealso cref="#setLength(int)"/> to
-    ///  set the length of the term text.  See <a target="_top"
-    ///  href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
-    ///  for details.</p>
-    ///  <p>Typical Token reuse patterns:
-    ///  <ul>
-    ///  <li> Copying text from a string (type is reset to <seealso cref="#DEFAULT_TYPE"/> if not specified):<br/>
-    ///  <pre class="prettyprint">
-    ///    return reusableToken.reinit(string, startOffset, endOffset[, type]);
-    ///  </pre>
-    ///  </li>
-    ///  <li> Copying some text from a string (type is reset to <seealso cref="#DEFAULT_TYPE"/> if not specified):<br/>
-    ///  <pre class="prettyprint">
-    ///    return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
-    ///  </pre>
-    ///  </li>
-    ///  </li>
-    ///  <li> Copying text from char[] buffer (type is reset to <seealso cref="#DEFAULT_TYPE"/> if not specified):<br/>
-    ///  <pre class="prettyprint">
-    ///    return reusableToken.reinit(buffer, 0, buffer.length, startOffset, endOffset[, type]);
-    ///  </pre>
-    ///  </li>
-    ///  <li> Copying some text from a char[] buffer (type is reset to <seealso cref="#DEFAULT_TYPE"/> if not specified):<br/>
-    ///  <pre class="prettyprint">
-    ///    return reusableToken.reinit(buffer, start, end - start, startOffset, endOffset[, type]);
-    ///  </pre>
-    ///  </li>
-    ///  <li> Copying from one one Token to another (type is reset to <seealso cref="#DEFAULT_TYPE"/> if not specified):<br/>
-    ///  <pre class="prettyprint">
-    ///    return reusableToken.reinit(source.buffer(), 0, source.length(), source.StartOffset, source.EndOffset[, source.type()]);
-    ///  </pre>
-    ///  </li>
-    ///  </ul>
-    ///  A few things to note:
-    ///  <ul>
-    ///  <li>clear() initializes all of the fields to default values. this was changed in contrast to Lucene 2.4, but should affect no one.</li>
-    ///  <li>Because <code>TokenStreams</code> can be chained, one cannot assume that the <code>Token's</code> current type is correct.</li>
-    ///  <li>The startOffset and endOffset represent the start and offset in the source text, so be careful in adjusting them.</li>
-    ///  <li>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</li>
-    ///  </ul>
-    ///  </p>
-    ///  <p>
-    ///  <b>Please note:</b> With Lucene 3.1, the <code><seealso cref="#toString toString()"/></code> method had to be changed to match the
-    ///  <seealso cref="CharSequence"/> interface introduced by the interface <seealso cref="Lucene.Net.Analysis.tokenattributes.CharTermAttribute"/>.
-    ///  this method now only prints the term text, no additional information anymore.
-    ///  </p>
+    /// <para><see cref="Tokenizer"/>s and <see cref="TokenFilter"/>s should try to re-use a <see cref="Token"/>
+    /// instance when possible for best performance, by
+    /// implementing the <see cref="TokenStream.IncrementToken()"/> API.
+    /// Failing that, to create a new <see cref="Token"/> you should first use
+    /// one of the constructors that starts with null text.  To load
+    /// the token from a char[] use <see cref="ICharTermAttribute.CopyBuffer(char[], int, int)"/>.
+    /// To load from a <see cref="string"/> use <see cref="ICharTermAttribute.SetEmpty()"/> followed by 
+    /// <see cref="ICharTermAttribute.Append(string)"/> or <see cref="ICharTermAttribute.Append(string, int, int)"/>.
+    /// Alternatively you can get the <see cref="Token"/>'s termBuffer by calling either <see cref="ICharTermAttribute.Buffer"/>,
+    /// if you know that your text is shorter than the capacity of the termBuffer
+    /// or <see cref="ICharTermAttribute.ResizeBuffer(int)"/>, if there is any possibility
+    /// that you may need to grow the buffer. Fill in the characters of your term into this
+    /// buffer, with <see cref="string.ToCharArray(int, int)"/> if loading from a string,
+    /// or with <see cref="System.Array.Copy(System.Array, int, System.Array, int, int)"/>, 
+    /// and finally call <see cref="ICharTermAttribute.SetLength(int)"/> to
+    /// set the length of the term text.  See <a target="_top"
+    /// href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
+    /// for details.</para>
+    /// <para>Typical Token reuse patterns:
+    /// <list type="bullet">
+    ///     <item> Copying text from a string (type is reset to <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> if not specified):
+    ///     <code>
+    ///         return reusableToken.Reinit(string, startOffset, endOffset[, type]);
+    ///     </code>
+    ///     </item>
+    ///     <item> Copying some text from a string (type is reset to <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> if not specified):
+    ///     <code>
+    ///         return reusableToken.Reinit(string, 0, string.Length, startOffset, endOffset[, type]);
+    ///     </code>
+    ///     </item>
+    ///     <item> Copying text from char[] buffer (type is reset to <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> if not specified):
+    ///     <code>
+    ///         return reusableToken.Reinit(buffer, 0, buffer.Length, startOffset, endOffset[, type]);
+    ///     </code>
+    ///     </item>
+    ///     <item> Copying some text from a char[] buffer (type is reset to <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> if not specified):
+    ///     <code>
+    ///         return reusableToken.Reinit(buffer, start, end - start, startOffset, endOffset[, type]);
+    ///     </code>
+    ///     </item>
+    ///     <item> Copying from one one <see cref="Token"/> to another (type is reset to <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> if not specified):
+    ///     <code>
+    ///         return reusableToken.Reinit(source.Buffer, 0, source.Length, source.StartOffset, source.EndOffset[, source.Type]);
+    ///     </code>
+    ///     </item>
+    /// </list>
+    /// A few things to note:
+    /// <list type="bullet">
+    ///     <item><see cref="Clear()"/> initializes all of the fields to default values. this was changed in contrast to Lucene 2.4, but should affect no one.</item>
+    ///     <item>Because <see cref="TokenStream"/>s can be chained, one cannot assume that the <see cref="Token"/>'s current type is correct.</item>
+    ///     <item>The startOffset and endOffset represent the start and offset in the source text, so be careful in adjusting them.</item>
+    ///     <item>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</item>
+    /// </list>
+    /// </para>
+    /// <para>
+    /// <b>Please note:</b> With Lucene 3.1, the <see cref="CharTermAttribute.ToString()"/> method had to be changed to match the
+    /// <see cref="Support.ICharSequence"/> interface introduced by the interface <see cref="ICharTermAttribute"/>.
+    /// this method now only prints the term text, no additional information anymore.
+    /// </para>
     /// </summary>
     public class Token : CharTermAttribute, ITypeAttribute, IPositionIncrementAttribute, IFlagsAttribute, IOffsetAttribute, IPayloadAttribute, IPositionLengthAttribute
     {
@@ -125,16 +126,18 @@ namespace Lucene.Net.Analysis
         private int positionLength = 1;
 
         /// <summary>
-        /// Constructs a Token will null text. </summary>
+        /// Constructs a <see cref="Token"/> will null text. </summary>
         public Token()
         {
+            string s = "fooobar";
+            s.ToCharArray();
         }
 
         /// <summary>
-        /// Constructs a Token with null text and start & end
-        ///  offsets. </summary>
-        ///  <param name="start"> start offset in the source text </param>
-        ///  <param name="end"> end offset in the source text  </param>
+        /// Constructs a <see cref="Token"/> with null text and start &amp; end
+        /// offsets. </summary>
+        /// <param name="start"> start offset in the source text </param>
+        /// <param name="end"> end offset in the source text  </param>
         public Token(int start, int end)
         {
             CheckOffsets(start, end);
@@ -143,11 +146,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Constructs a Token with null text and start & end
-        ///  offsets plus the Token type. </summary>
-        ///  <param name="start"> start offset in the source text </param>
-        ///  <param name="end"> end offset in the source text </param>
-        ///  <param name="typ"> the lexical type of this Token  </param>
+        /// Constructs a <see cref="Token"/> with null text and start &amp; end
+        /// offsets plus the <see cref="Token"/> type. </summary>
+        /// <param name="start"> start offset in the source text </param>
+        /// <param name="end"> end offset in the source text </param>
+        /// <param name="typ"> the lexical type of this <see cref="Token"/>  </param>
         public Token(int start, int end, string typ)
         {
             CheckOffsets(start, end);
@@ -157,11 +160,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Constructs a Token with null text and start & end
-        ///  offsets plus flags. NOTE: flags is EXPERIMENTAL. </summary>
-        ///  <param name="start"> start offset in the source text </param>
-        ///  <param name="end"> end offset in the source text </param>
-        ///  <param name="flags"> The bits to set for this token </param>
+        /// Constructs a <see cref="Token"/> with null text and start &amp; end
+        /// offsets plus flags. NOTE: flags is EXPERIMENTAL. </summary>
+        /// <param name="start"> start offset in the source text </param>
+        /// <param name="end"> end offset in the source text </param>
+        /// <param name="flags"> The bits to set for this token </param>
         public Token(int start, int end, int flags)
         {
             CheckOffsets(start, end);
@@ -171,14 +174,14 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Constructs a Token with the given term text, and start
-        ///  & end offsets.  The type defaults to "word."
-        ///  <b>NOTE:</b> for better indexing speed you should
-        ///  instead use the char[] termBuffer methods to set the
-        ///  term text. </summary>
-        ///  <param name="text"> term text </param>
-        ///  <param name="start"> start offset in the source text </param>
-        ///  <param name="end"> end offset in the source text </param>
+        /// Constructs a <see cref="Token"/> with the given term text, and start
+        /// &amp; end offsets.  The type defaults to "word."
+        /// <b>NOTE:</b> for better indexing speed you should
+        /// instead use the char[] termBuffer methods to set the
+        /// term text. </summary>
+        /// <param name="text"> term text </param>
+        /// <param name="start"> start offset in the source text </param>
+        /// <param name="end"> end offset in the source text </param>
         public Token(string text, int start, int end)
         {
             CheckOffsets(start, end);
@@ -188,14 +191,14 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Constructs a Token with the given text, start and end
-        ///  offsets, & type.  <b>NOTE:</b> for better indexing
-        ///  speed you should instead use the char[] termBuffer
-        ///  methods to set the term text. </summary>
-        ///  <param name="text"> term text </param>
-        ///  <param name="start"> start offset in the source text </param>
-        ///  <param name="end"> end offset in the source text </param>
-        ///  <param name="typ"> token type </param>
+        /// Constructs a <see cref="Token"/> with the given text, start and end
+        /// offsets, &amp; type.  <b>NOTE:</b> for better indexing
+        /// speed you should instead use the char[] termBuffer
+        /// methods to set the term text. </summary>
+        /// <param name="text"> term text </param>
+        /// <param name="start"> start offset in the source text </param>
+        /// <param name="end"> end offset in the source text </param>
+        /// <param name="typ"> token type </param>
         public Token(string text, int start, int end, string typ)
         {
             CheckOffsets(start, end);
@@ -206,10 +209,10 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        ///  Constructs a Token with the given text, start and end
-        ///  offsets, & type.  <b>NOTE:</b> for better indexing
-        ///  speed you should instead use the char[] termBuffer
-        ///  methods to set the term text. </summary>
+        /// Constructs a <see cref="Token"/> with the given text, start and end
+        /// offsets, &amp; type.  <b>NOTE:</b> for better indexing
+        /// speed you should instead use the char[] termBuffer
+        /// methods to set the term text. </summary>
         /// <param name="text"> term text </param>
         /// <param name="start"> start offset in the source text </param>
         /// <param name="end"> end offset in the source text </param>
@@ -224,9 +227,9 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        ///  Constructs a Token with the given term buffer (offset
-        ///  & length), start and end
-        ///  offsets </summary>
+        /// Constructs a <see cref="Token"/> with the given term buffer (offset
+        /// &amp; length), start and end offsets
+        /// </summary>
         /// <param name="startTermBuffer"> buffer containing term text </param>
         /// <param name="termBufferOffset"> the index in the buffer of the first character </param>
         /// <param name="termBufferLength"> number of valid characters in the buffer </param>
@@ -241,8 +244,10 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// {@inheritDoc} </summary>
-        /// <seealso cref= PositionIncrementAttribute </seealso>
+        /// Gets or Sets the position increment (the distance from the prior term). The default value is one.
+        /// </summary>
+        /// <exception cref="System.ArgumentException"> if value is set to a negative value. </exception>
+        /// <seealso cref="IPositionIncrementAttribute"/>
         public virtual int PositionIncrement
         {
             set
@@ -260,8 +265,14 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// {@inheritDoc} </summary>
-        /// <seealso cref= PositionLengthAttribute </seealso>
+        /// Gets or Sets the position length of this <see cref="Token"/> (how many positions this token
+        /// spans).
+        /// <para/>
+        /// The default value is one.
+        /// </summary>
+        /// <exception cref="System.ArgumentException"> if value
+        ///         is set to zero or negative. </exception>
+        /// <seealso cref="IPositionLengthAttribute"/>
         public virtual int PositionLength
         {
             set
@@ -275,24 +286,41 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// {@inheritDoc} </summary>
-        /// <seealso cref= OffsetAttribute </seealso>
+        /// Returns this <see cref="Token"/>'s starting offset, the position of the first character
+        /// corresponding to this token in the source text.
+        /// <para/>
+        /// Note that the difference between <see cref="EndOffset"/> and <see cref="StartOffset"/>
+        /// may not be equal to termText.Length, as the term text may have been altered by a
+        /// stemmer or some other filter.
+        /// </summary>
+        /// <seealso cref="SetOffset(int, int)"/>
+        /// <seealso cref="IOffsetAttribute"/>
         public int StartOffset
         {
             get { return startOffset; }
         }
 
         /// <summary>
-        /// {@inheritDoc} </summary>
-        /// <seealso cref= OffsetAttribute </seealso>
+        /// Returns this <see cref="Token"/>'s ending offset, one greater than the position of the
+        /// last character corresponding to this token in the source text. The length
+        /// of the token in the source text is (<code>EndOffset</code> - <see cref="StartOffset"/>).
+        /// </summary>
+        /// <seealso cref="SetOffset(int, int)"/>
+        /// <seealso cref="IOffsetAttribute"/>
         public int EndOffset
         {
             get { return endOffset; }
         }
 
         /// <summary>
-        /// {@inheritDoc} </summary>
-        /// <seealso cref= OffsetAttribute </seealso>
+        /// Set the starting and ending offset.
+        /// </summary>
+        /// <exception cref="System.ArgumentException"> If <paramref name="startOffset"/> or <paramref name="endOffset"/>
+        ///         are negative, or if <paramref name="startOffset"/> is greater than
+        ///         <paramref name="endOffset"/> </exception>
+        /// <seealso cref="StartOffset"/>
+        /// <seealso cref="EndOffset"/>
+        /// <seealso cref="IOffsetAttribute"/>
         public virtual void SetOffset(int startOffset, int endOffset)
         {
             CheckOffsets(startOffset, endOffset);
@@ -300,7 +328,7 @@ namespace Lucene.Net.Analysis
             this.endOffset = endOffset;
         }
 
-        /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+        /// <summary>Gets or Sets this <see cref="Token"/>'s lexical type.  Defaults to "word". </summary>
         public string Type
         {
             get { return type; }
@@ -308,8 +336,12 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// {@inheritDoc} </summary>
-        /// <seealso cref= FlagsAttribute </seealso>
+        /// Get the bitset for any bits that have been set.
+        /// <para/>
+        /// This is completely distinct from <see cref="ITypeAttribute.Type" />, although they do share similar purposes.
+        /// The flags can be used to encode information about the token for use by other <see cref="Lucene.Net.Analysis.TokenFilter" />s.
+        /// </summary>
+        /// <seealso cref="IFlagsAttribute"/>
         public virtual int Flags
         {
             get
@@ -323,8 +355,9 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// {@inheritDoc} </summary>
-        /// <seealso cref= PayloadAttribute </seealso>
+        /// Gets or Sets this <see cref="Token"/>'s payload.
+        /// </summary>
+        /// <seealso cref="IPayloadAttribute"/>
         public virtual BytesRef Payload
         {
             get
@@ -363,10 +396,10 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Makes a clone, but replaces the term buffer &
-        /// start/end offset in the process.  this is more
+        /// Makes a clone, but replaces the term buffer &amp;
+        /// start/end offset in the process.  This is more
         /// efficient than doing a full clone (and then calling
-        /// <seealso cref="#copyBuffer"/>) because it saves a wasted copy of the old
+        /// <see cref="ICharTermAttribute.CopyBuffer"/>) because it saves a wasted copy of the old
         /// termBuffer.
         /// </summary>
         public virtual Token Clone(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
@@ -431,11 +464,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Shorthand for calling <seealso cref="#clear"/>,
-        ///  <seealso cref="#copyBuffer(char[], int, int)"/>,
-        ///  <seealso cref="#setOffset"/>,
-        ///  <seealso cref="#setType"/> </summary>
-        ///  <returns> this Token instance  </returns>
+        /// Shorthand for calling <see cref="Clear"/>,
+        /// <see cref="CopyBuffer(char[], int, int)"/>,
+        /// <see cref="SetOffset"/>,
+        /// <see cref="Type"/> (set) </summary>
+        /// <returns> this <see cref="Token"/> instance  </returns>
         public virtual Token Reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, string newType)
         {
             CheckOffsets(newStartOffset, newEndOffset);
@@ -450,11 +483,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Shorthand for calling <seealso cref="#clear"/>,
-        ///  <seealso cref="#copyBuffer(char[], int, int)"/>,
-        ///  <seealso cref="#setOffset"/>,
-        ///  <seealso cref="#setType"/> on Token.DEFAULT_TYPE </summary>
-        ///  <returns> this Token instance  </returns>
+        /// Shorthand for calling <see cref="Clear"/>,
+        /// <see cref="CopyBuffer(char[], int, int)"/>,
+        /// <see cref="SetOffset"/>,
+        /// <see cref="Type"/> (set) on <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> </summary>
+        /// <returns> this <see cref="Token"/> instance  </returns>
         public virtual Token Reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
         {
             CheckOffsets(newStartOffset, newEndOffset);
@@ -467,11 +500,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Shorthand for calling <seealso cref="#clear"/>,
-        ///  <seealso cref="#append(CharSequence)"/>,
-        ///  <seealso cref="#setOffset"/>,
-        ///  <seealso cref="#setType"/> </summary>
-        ///  <returns> this Token instance  </returns>
+        /// Shorthand for calling <see cref="Clear"/>,
+        /// <see cref="Append(string)"/>,
+        /// <see cref="SetOffset"/>,
+        /// <see cref="Type"/> (set) </summary>
+        /// <returns> this <see cref="Token"/> instance  </returns>
         public virtual Token Reinit(string newTerm, int newStartOffset, int newEndOffset, string newType)
         {
             CheckOffsets(newStartOffset, newEndOffset);
@@ -484,11 +517,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Shorthand for calling <seealso cref="#clear"/>,
-        ///  <seealso cref="#append(CharSequence, int, int)"/>,
-        ///  <seealso cref="#setOffset"/>,
-        ///  <seealso cref="#setType"/> </summary>
-        ///  <returns> this Token instance  </returns>
+        /// Shorthand for calling <see cref="Clear"/>,
+        /// <see cref="Append(string, int, int)"/>,
+        /// <see cref="SetOffset"/>,
+        /// <see cref="Type"/> (set) </summary>
+        /// <returns> this <see cref="Token"/> instance  </returns>
         public virtual Token Reinit(string newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, string newType)
         {
             CheckOffsets(newStartOffset, newEndOffset);
@@ -501,11 +534,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Shorthand for calling <seealso cref="#clear"/>,
-        ///  <seealso cref="#append(CharSequence)"/>,
-        ///  <seealso cref="#setOffset"/>,
-        ///  <seealso cref="#setType"/> on Token.DEFAULT_TYPE </summary>
-        ///  <returns> this Token instance  </returns>
+        /// Shorthand for calling <see cref="Clear"/>,
+        /// <see cref="Append(string)"/>,
+        /// <see cref="SetOffset"/>,
+        /// <see cref="Type"/> (set) on <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> </summary>
+        /// <returns> this <see cref="Token"/> instance  </returns>
         public virtual Token Reinit(string newTerm, int newStartOffset, int newEndOffset)
         {
             CheckOffsets(newStartOffset, newEndOffset);
@@ -518,11 +551,11 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Shorthand for calling <seealso cref="#clear"/>,
-        ///  <seealso cref="#append(CharSequence, int, int)"/>,
-        ///  <seealso cref="#setOffset"/>,
-        ///  <seealso cref="#setType"/> on Token.DEFAULT_TYPE </summary>
-        ///  <returns> this Token instance  </returns>
+        /// Shorthand for calling <see cref="Clear"/>,
+        /// <see cref="Append(string, int, int)"/>,
+        /// <see cref="SetOffset"/>,
+        /// <see cref="Type"/> (set) on <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> </summary>
+        /// <returns> this <see cref="Token"/> instance  </returns>
         public virtual Token Reinit(string newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
         {
             CheckOffsets(newStartOffset, newEndOffset);
@@ -536,7 +569,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Copy the prototype token's fields into this one. Note: Payloads are shared. </summary>
-        /// <param name="prototype"> source Token to copy fields from </param>
+        /// <param name="prototype"> source <see cref="Token"/> to copy fields from </param>
         public virtual void Reinit(Token prototype)
         {
             CopyBuffer(prototype.Buffer, 0, prototype.Length);
@@ -550,7 +583,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared. </summary>
-        /// <param name="prototype"> existing Token </param>
+        /// <param name="prototype"> existing <see cref="Token"/> </param>
         /// <param name="newTerm"> new term text </param>
         public virtual void Reinit(Token prototype, string newTerm)
         {
@@ -565,7 +598,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared. </summary>
-        /// <param name="prototype"> existing Token </param>
+        /// <param name="prototype"> existing <see cref="Token"/> </param>
         /// <param name="newTermBuffer"> buffer containing new term text </param>
         /// <param name="offset"> the index in the buffer of the first character </param>
         /// <param name="length"> number of valid characters in the buffer </param>
@@ -623,7 +656,7 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Convenience factory that returns <code>Token</code> as implementation for the basic
+        /// Convenience factory that returns <see cref="Token"/> as implementation for the basic
         /// attributes and return the default impl (with &quot;Impl&quot; appended) for all other
         /// attributes.
         /// @since 3.0
@@ -631,7 +664,7 @@ namespace Lucene.Net.Analysis
         public static readonly AttributeSource.AttributeFactory TOKEN_ATTRIBUTE_FACTORY = new TokenAttributeFactory(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY);
 
         /// <summary>
-        /// <b>Expert:</b> Creates a TokenAttributeFactory returning <seealso cref="Token"/> as instance for the basic attributes
+        /// <b>Expert:</b> Creates a <see cref="TokenAttributeFactory"/> returning <see cref="Token"/> as instance for the basic attributes
         /// and for all other attributes calls the given delegate factory.
         /// @since 3.0
         /// </summary>
@@ -640,7 +673,7 @@ namespace Lucene.Net.Analysis
             internal readonly AttributeSource.AttributeFactory @delegate;
 
             /// <summary>
-            /// <b>Expert</b>: Creates an AttributeFactory returning <seealso cref="Token"/> as instance for the basic attributes
+            /// <b>Expert</b>: Creates an <see cref="AttributeSource.AttributeFactory"/> returning <see cref="Token"/> as instance for the basic attributes
             /// and for all other attributes calls the given delegate factory.
             /// </summary>
             public TokenAttributeFactory(AttributeSource.AttributeFactory @delegate)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/CharTermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/CharTermAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/CharTermAttribute.cs
index 8b37993..e1e73a9 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/CharTermAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/CharTermAttribute.cs
@@ -32,7 +32,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using UnicodeUtil = Lucene.Net.Util.UnicodeUtil;
 
     /// <summary>
-    /// Default implementation of <seealso cref="CharTermAttribute"/>. </summary>
+    /// Default implementation of <see cref="ICharTermAttribute"/>. </summary>
     public class CharTermAttribute : Attribute, ICharTermAttribute, ITermToBytesRefAttribute
 #if FEATURE_CLONEABLE
         , ICloneable
@@ -374,12 +374,13 @@ namespace Lucene.Net.Analysis.TokenAttributes
 
         /// <summary>
         /// Returns solely the term text as specified by the
-        /// <seealso cref="CharSequence"/> interface.
-        /// <p>this method changed the behavior with Lucene 3.1,
+        /// <see cref="ICharSequence"/> interface.
+        /// <para/>
+        /// this method changed the behavior with Lucene 3.1,
         /// before it returned a String representation of the whole
         /// term with all attributes.
         /// this affects especially the
-        /// <seealso cref="Lucene.Net.Analysis.Token"/> subclass.
+        /// <see cref="Lucene.Net.Analysis.Token"/> subclass.
         /// </summary>
         public override string ToString()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/FlagsAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/FlagsAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/FlagsAttribute.cs
index abae50b..1f2f7b6 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/FlagsAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/FlagsAttribute.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using IAttribute = Lucene.Net.Util.IAttribute;
 
     /// <summary>
-    /// Default implementation of <seealso cref="FlagsAttribute"/>. </summary>
+    /// Default implementation of <see cref="IFlagsAttribute"/>. </summary>
     public class FlagsAttribute : Attribute, IFlagsAttribute
 #if FEATURE_CLONEABLE
         , ICloneable

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/ICharTermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/ICharTermAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/ICharTermAttribute.cs
index 7ee5bac..7985e55 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/ICharTermAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/ICharTermAttribute.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     */
 
     /// <summary>
-    /// The term text of a Token.
+    /// The term text of a <see cref="Token"/>.
     /// </summary>
     public interface ICharTermAttribute : IAttribute, ICharSequence
 #if FEATURE_CLONEABLE
@@ -54,7 +54,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
         char[] Buffer { get; }
 
         /// <summary>
-        /// Grows the termBuffer to at least size newSize, preserving the
+        /// Grows the termBuffer to at least size <paramref name="newSize"/>, preserving the
         /// existing content. </summary>
         /// <param name="newSize"> minimum size of the new termBuffer </param>
         /// <returns> newly created termBuffer with length >= newSize </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/IFlagsAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/IFlagsAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/IFlagsAttribute.cs
index 9068d70..2aa5cf1 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/IFlagsAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/IFlagsAttribute.cs
@@ -21,18 +21,20 @@ namespace Lucene.Net.Analysis.TokenAttributes
 
     /// <summary> This attribute can be used to pass different flags down the <see cref="Tokenizer" /> chain,
     /// eg from one TokenFilter to another one.
+    /// <para/>
+    /// This is completely distinct from <see cref="TypeAttribute"/>, although they do share similar purposes.
+    /// The flags can be used to encode information about the token for use by other 
+    /// <see cref="TokenFilter"/>s.
+    /// @lucene.experimental While we think this is here to stay, we may want to change it to be a long.
     /// </summary>
     public interface IFlagsAttribute : IAttribute
     {
-        /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
-        /// <p/>
-        ///
-        /// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="ITypeAttribute.Type()" />, although they do share similar purposes.
+        /// <summary>
+        /// Get the bitset for any bits that have been set.
+        /// <para/>
+        /// This is completely distinct from <see cref="ITypeAttribute.Type" />, although they do share similar purposes.
         /// The flags can be used to encode information about the token for use by other <see cref="Lucene.Net.Analysis.TokenFilter" />s.
-        ///
-        ///
         /// </summary>
-        /// <value> The bits </value>
         int Flags { get; set; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/IKeywordAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/IKeywordAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/IKeywordAttribute.cs
index 958eb66..0528f02 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/IKeywordAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/IKeywordAttribute.cs
@@ -20,21 +20,18 @@ namespace Lucene.Net.Analysis.TokenAttributes
      */
 
     /// <summary>
-    /// this attribute can be used to mark a token as a keyword. Keyword aware
-    /// <seealso cref="TokenStream"/>s can decide to modify a token based on the return value
-    /// of <seealso cref="#isKeyword()"/> if the token is modified. Stemming filters for
+    /// This attribute can be used to mark a token as a keyword. Keyword aware
+    /// <see cref="TokenStream"/>s can decide to modify a token based on the return value
+    /// of <see cref="IsKeyword"/> if the token is modified. Stemming filters for
     /// instance can use this attribute to conditionally skip a term if
-    /// <seealso cref="#isKeyword()"/> returns <code>true</code>.
+    /// <see cref="IsKeyword"/> returns <c>true</c>.
     /// </summary>
     public interface IKeywordAttribute : IAttribute
     {
         /// <summary>
-        /// Returns <code>true</code> if the current token is a keyword, otherwise
-        /// <code>false</code>
+        /// Gets or Sets whether the current token is a keyword. <c>true</c> if the current token is a keyword, otherwise
+        /// <c>false</c>.
         /// </summary>
-        /// <returns> <code>true</code> if the current token is a keyword, otherwise
-        ///         <code>false</code> </returns>
-        /// <seealso cref= #setKeyword(boolean) </seealso>
         bool IsKeyword { get; set; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/IOffsetAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/IOffsetAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/IOffsetAttribute.cs
index 00aa0ad..590f2f0 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/IOffsetAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/IOffsetAttribute.cs
@@ -20,34 +20,37 @@ namespace Lucene.Net.Analysis.TokenAttributes
      */
 
     /// <summary>
-    /// The start and end character offset of a Token.
+    /// The start and end character offset of a <see cref="Token"/>.
     /// </summary>
     public interface IOffsetAttribute : IAttribute
     {
         /// <summary>
-        /// Returns this Token's starting offset, the position of the first character
+        /// Returns this <see cref="Token"/>'s starting offset, the position of the first character
         /// corresponding to this token in the source text.
-        /// <p>
-        /// Note that the difference between <seealso cref="#EndOffset()"/> and <code>StartOffset()</code>
-        /// may not be equal to termText.length(), as the term text may have been altered by a
-        /// stemmer or some other filter. </summary>
-        /// <seealso cref= #SetOffset(int, int)  </seealso>
+        /// <para/>
+        /// Note that the difference between <see cref="EndOffset"/> and <see cref="StartOffset"/>
+        /// may not be equal to termText.Length, as the term text may have been altered by a
+        /// stemmer or some other filter.
+        /// </summary>
+        /// <seealso cref="SetOffset(int, int)"/>
         int StartOffset { get; }
 
         /// <summary>
-        /// Set the starting and ending offset. </summary>
-        /// <exception cref="IllegalArgumentException"> If <code>startOffset</code> or <code>endOffset</code>
-        ///         are negative, or if <code>startOffset</code> is greater than
-        ///         <code>endOffset</code> </exception>
-        /// <seealso cref= #StartOffset() </seealso>
-        /// <seealso cref= #EndOffset() </seealso>
+        /// Set the starting and ending offset.
+        /// </summary>
+        /// <exception cref="System.ArgumentException"> If <paramref name="startOffset"/> or <paramref name="endOffset"/>
+        ///         are negative, or if <paramref name="startOffset"/> is greater than
+        ///         <paramref name="endOffset"/> </exception>
+        /// <seealso cref="StartOffset"/>
+        /// <seealso cref="EndOffset"/>
         void SetOffset(int startOffset, int endOffset);
 
         /// <summary>
-        /// Returns this Token's ending offset, one greater than the position of the
+        /// Returns this <see cref="Token"/>'s ending offset, one greater than the position of the
         /// last character corresponding to this token in the source text. The length
-        /// of the token in the source text is (<code>EndOffset()</code> - <seealso cref="#StartOffset()"/>). </summary>
-        /// <seealso cref= #SetOffset(int, int) </seealso>
+        /// of the token in the source text is (<code>EndOffset</code> - <see cref="StartOffset"/>).
+        /// </summary>
+        /// <seealso cref="SetOffset(int, int)"/>
         int EndOffset { get; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/IPayloadAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/IPayloadAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/IPayloadAttribute.cs
index fa644c0..bd9efac 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/IPayloadAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/IPayloadAttribute.cs
@@ -24,22 +24,22 @@ namespace Lucene.Net.Analysis.TokenAttributes
 
     /// <summary>
     /// The payload of a Token.
-    /// <p>
+    /// <para/>
     /// The payload is stored in the index at each position, and can
     /// be used to influence scoring when using Payload-based queries
-    /// in the <seealso cref="Lucene.Net.Search.Payloads"/> and
-    /// <seealso cref="Lucene.Net.Search.Spans"/> packages.
-    /// <p>
+    /// in the <see cref="Lucene.Net.Search.Payloads"/> and
+    /// <see cref="Lucene.Net.Search.Spans"/> namespaces.
+    /// <para/>
     /// NOTE: because the payload will be stored at each position, its usually
     /// best to use the minimum number of bytes necessary. Some codec implementations
     /// may optimize payload storage when all payloads have the same length.
     /// </summary>
-    /// <seealso cref= DocsAndPositionsEnum </seealso>
+    /// <seealso cref="Index.DocsAndPositionsEnum"/>
     public interface IPayloadAttribute : IAttribute
     {
         /// <summary>
-        /// Returns this Token's payload. </summary>
-        /// <seealso cref= #setPayload(BytesRef) </seealso>
+        /// Gets or Sets this <see cref="Token"/>'s payload.
+        /// </summary>
         BytesRef Payload { get; set; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionIncrementAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionIncrementAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionIncrementAttribute.cs
index 62848aa..3d47b7d 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionIncrementAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionIncrementAttribute.cs
@@ -21,39 +21,36 @@ namespace Lucene.Net.Analysis.TokenAttributes
 
     /// <summary>
     /// Determines the position of this token
-    /// relative to the previous Token in a TokenStream, used in phrase
+    /// relative to the previous <see cref="Token"/> in a <see cref="TokenStream"/>, used in phrase
     /// searching.
     ///
-    /// <p>The default value is one.
+    /// <para/>The default value is one.
     ///
-    /// <p>Some common uses for this are:<ul>
-    ///
-    /// <li>Set it to zero to put multiple terms in the same position.  this is
+    /// <para/>Some common uses for this are:
+    /// 
+    /// <list type="bullet">
+    /// <item>Set it to zero to put multiple terms in the same position.  this is
     /// useful if, e.g., a word has multiple stems.  Searches for phrases
     /// including either stem will match.  In this case, all but the first stem's
     /// increment should be set to zero: the increment of the first instance
     /// should be one.  Repeating a token with an increment of zero can also be
-    /// used to boost the scores of matches on that token.
+    /// used to boost the scores of matches on that token.</item>
     ///
-    /// <li>Set it to values greater than one to inhibit exact phrase matches.
+    /// <item>Set it to values greater than one to inhibit exact phrase matches.
     /// If, for example, one does not want phrases to match across removed stop
     /// words, then one could build a stop word filter that removes stop words and
     /// also sets the increment to the number of stop words removed before each
     /// non-stop word.  Then exact phrase queries will only match when the terms
-    /// occur with no intervening stop words.
-    ///
-    /// </ul>
+    /// occur with no intervening stop words.</item>
+    /// </list>
     /// </summary>
-    /// <seealso cref= Lucene.Net.Index.DocsAndPositionsEnum </seealso>
+    /// <seealso cref="Lucene.Net.Index.DocsAndPositionsEnum"/>
     public interface IPositionIncrementAttribute : IAttribute
     {
         /// <summary>
-        /// Set the position increment. The default value is one.
+        /// Gets or Sets the position increment (the distance from the prior term). The default value is one.
         /// </summary>
-        /// <param name="positionIncrement"> the distance from the prior term </param>
-        /// <exception cref="IllegalArgumentException"> if <code>positionIncrement</code>
-        ///         is negative. </exception>
-        /// <seealso cref= #getPositionIncrement() </seealso>
+        /// <exception cref="System.ArgumentException"> if value is set to a negative value. </exception>
         int PositionIncrement { set; get; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionLengthAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionLengthAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionLengthAttribute.cs
index 7b9edfc..07758f2 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionLengthAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/IPositionLengthAttribute.cs
@@ -21,27 +21,25 @@ namespace Lucene.Net.Analysis.TokenAttributes
 
     /// <summary>
     /// Determines how many positions this
-    ///  token spans.  Very few analyzer components actually
-    ///  produce this attribute, and indexing ignores it, but
-    ///  it's useful to express the graph structure naturally
-    ///  produced by decompounding, word splitting/joining,
-    ///  synonym filtering, etc.
+    /// token spans.  Very few analyzer components actually
+    /// produce this attribute, and indexing ignores it, but
+    /// it's useful to express the graph structure naturally
+    /// produced by decompounding, word splitting/joining,
+    /// synonym filtering, etc.
     ///
-    /// <p>NOTE: this is optional, and most analyzers
-    ///  don't change the default value (1).
+    /// <para/>NOTE: this is optional, and most analyzers
+    /// don't change the default value (1).
     /// </summary>
-
     public interface IPositionLengthAttribute : IAttribute
     {
         /// <summary>
-        /// Set the position length of this Token.
-        /// <p>
-        /// The default value is one. </summary>
-        /// <param name="positionLength"> how many positions this token
-        ///  spans. </param>
-        /// <exception cref="IllegalArgumentException"> if <code>positionLength</code>
-        ///         is zero or negative. </exception>
-        /// <seealso cref= #getPositionLength() </seealso>
+        /// Gets or Sets the position length of this <see cref="Token"/> (how many positions this token
+        /// spans).
+        /// <para/>
+        /// The default value is one.
+        /// </summary>
+        /// <exception cref="System.ArgumentException"> if value
+        ///         is set to zero or negative. </exception>
         int PositionLength { set; get; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermAttribute.cs
index 58f7550..940e6ba 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermAttribute.cs
@@ -18,7 +18,8 @@ namespace Lucene.Net.Analysis.TokenAttributes
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
-
+    
+    // LUCENENET TODO: Remove this type (it doesn't exist in Lucene 4.8.0)
     /// <summary> The term text of a Token.</summary>
     public interface ITermAttribute : IAttribute
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermToBytesRefAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermToBytesRefAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermToBytesRefAttribute.cs
index b48294c..a8964c1 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermToBytesRefAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/ITermToBytesRefAttribute.cs
@@ -22,48 +22,49 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// this attribute is requested by TermsHashPerField to index the contents.
-    /// this attribute can be used to customize the final byte[] encoding of terms.
-    /// <p>
-    /// Consumers of this attribute call <seealso cref="#getBytesRef()"/> up-front, and then
-    /// invoke <seealso cref="#fillBytesRef()"/> for each term. Example:
-    /// <pre class="prettyprint">
-    ///   final TermToBytesRefAttribute termAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
-    ///   final BytesRef bytes = termAtt.getBytesRef();
+    /// This attribute is requested by TermsHashPerField to index the contents.
+    /// This attribute can be used to customize the final byte[] encoding of terms.
+    /// <para/>
+    /// Consumers of this attribute call <see cref="BytesRef"/> up-front, and then
+    /// invoke <see cref="FillBytesRef()"/> for each term. Example:
+    /// <code>
+    ///   TermToBytesRefAttribute termAtt = tokenStream.GetAttribute&lt;TermToBytesRefAttribute&gt;;
+    ///   BytesRef bytes = termAtt.BytesRef;
     ///
-    ///   while (tokenStream.IncrementToken() {
-    ///
-    ///     // you must call termAtt.fillBytesRef() before doing something with the bytes.
+    ///   while (tokenStream.IncrementToken()
+    ///   {
+    ///     // you must call termAtt.FillBytesRef() before doing something with the bytes.
     ///     // this encodes the term value (internally it might be a char[], etc) into the bytes.
-    ///     int hashCode = termAtt.fillBytesRef();
-    ///
-    ///     if (isInteresting(bytes)) {
+    ///     int hashCode = termAtt.FillBytesRef();
     ///
+    ///     if (IsInteresting(bytes))
+    ///     {
     ///       // because the bytes are reused by the attribute (like CharTermAttribute's char[] buffer),
     ///       // you should make a copy if you need persistent access to the bytes, otherwise they will
     ///       // be rewritten across calls to IncrementToken()
     ///
-    ///       doSomethingWith(new BytesRef(bytes));
+    ///       DoSomethingWith(new BytesRef(bytes));
     ///     }
     ///   }
     ///   ...
-    /// </pre>
+    /// </code>
     /// @lucene.experimental this is a very expert API, please use
-    /// <seealso cref="CharTermAttributeImpl"/> and its implementation of this method
+    /// <see cref="CharTermAttribute"/> and its implementation of this method
     /// for UTF-8 terms.
     /// </summary>
     public interface ITermToBytesRefAttribute : IAttribute
     {
         /// <summary>
-        /// Updates the bytes <seealso cref="#getBytesRef()"/> to contain this term's
+        /// Updates the bytes <see cref="Util.BytesRef"/> to contain this term's
         /// final encoding.
         /// </summary>
         void FillBytesRef();
 
         /// <summary>
-        /// Retrieve this attribute's BytesRef. The bytes are updated
-        /// from the current term when the consumer calls <seealso cref="#fillBytesRef()"/>. </summary>
-        /// <returns> this Attributes internal BytesRef. </returns>
+        /// Retrieve this attribute's <see cref="Util.BytesRef"/>. The bytes are updated
+        /// from the current term when the consumer calls <see cref="FillBytesRef()"/>.
+        /// </summary>
+        /// <returns> this <see cref="Util.IAttribute"/>s internal <see cref="Util.BytesRef"/>. </returns>
         BytesRef BytesRef { get; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/ITypeAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/ITypeAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/ITypeAttribute.cs
index 295032b..fd5af3e 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/ITypeAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/ITypeAttribute.cs
@@ -20,21 +20,20 @@ namespace Lucene.Net.Analysis.TokenAttributes
      */
 
     /// <summary>
-    /// A Token's lexical type. The Default value is "word".
+    /// A <see cref="Token"/>'s lexical type. The Default value is "word".
     /// </summary>
     public interface ITypeAttribute : IAttribute
     {
         /// <summary>
-        /// the default type </summary>
-
-        /// <summary>
-        /// Set the lexical type. </summary>
-        /// <seealso cref= #type()  </seealso>
+        /// Gets or Sets the lexical type. </summary>
         string Type { get; set; }
     }
 
     public static class TypeAttribute_Fields
     {
+        /// <summary>
+        /// the default type
+        /// </summary>
         public const string DEFAULT_TYPE = "word";
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/KeywordAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/KeywordAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/KeywordAttribute.cs
index 03348cd..48db835 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/KeywordAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/KeywordAttribute.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using IAttribute = Lucene.Net.Util.IAttribute;
 
     /// <summary>
-    /// Default implementation of <seealso cref="KeywordAttribute"/>. </summary>
+    /// Default implementation of <see cref="IKeywordAttribute"/>. </summary>
     public sealed class KeywordAttribute : Attribute, IKeywordAttribute
     {
         private bool keyword;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/OffsetAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/OffsetAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/OffsetAttribute.cs
index b5627a5..20185af 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/OffsetAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/OffsetAttribute.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using IAttribute = Lucene.Net.Util.IAttribute;
 
     /// <summary>
-    /// Default implementation of <seealso cref="OffsetAttribute"/>. </summary>
+    /// Default implementation of <see cref="IOffsetAttribute"/>. </summary>
     public class OffsetAttribute : Attribute, IOffsetAttribute
     {
         private int startOffset;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/PayloadAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/PayloadAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/PayloadAttribute.cs
index 9a9d7a0..4ba1e5a 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/PayloadAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/PayloadAttribute.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// Default implementation of <seealso cref="PayloadAttribute"/>. </summary>
+    /// Default implementation of <see cref="IPayloadAttribute"/>. </summary>
     public class PayloadAttribute : Attribute, IPayloadAttribute
     {
         private BytesRef payload;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionIncrementAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionIncrementAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionIncrementAttribute.cs
index 0f6a41b..00a82ba 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionIncrementAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionIncrementAttribute.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using IAttribute = Lucene.Net.Util.IAttribute;
 
     /// <summary>
-    /// Default implementation of <seealso cref="PositionIncrementAttribute"/>. </summary>
+    /// Default implementation of <see cref="IPositionIncrementAttribute"/>. </summary>
     public class PositionIncrementAttribute : Attribute, IPositionIncrementAttribute
     {
         private int positionIncrement = 1;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionLengthAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionLengthAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionLengthAttribute.cs
index ea51a7d..de2896d 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionLengthAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/PositionLengthAttribute.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using IAttribute = Lucene.Net.Util.IAttribute;
 
     /// <summary>
-    /// Default implementation of <seealso cref="PositionLengthAttribute"/>. </summary>
+    /// Default implementation of <see cref="IPositionLengthAttribute"/>. </summary>
     public class PositionLengthAttribute : Attribute, IPositionLengthAttribute
     {
         private int positionLength = 1;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenAttributes/TypeAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenAttributes/TypeAttribute.cs b/src/Lucene.Net.Core/Analysis/TokenAttributes/TypeAttribute.cs
index 956759e..abf77b4 100644
--- a/src/Lucene.Net.Core/Analysis/TokenAttributes/TypeAttribute.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenAttributes/TypeAttribute.cs
@@ -23,8 +23,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
     using IAttribute = Lucene.Net.Util.IAttribute;
 
     /// <summary>
-    /// Default implementation of <seealso cref="TypeAttribute"/>. </summary>
-
+    /// Default implementation of <see cref="ITypeAttribute"/>. </summary>
     public class TypeAttribute : Attribute, ITypeAttribute
 #if FEATURE_CLONEABLE
         , ICloneable
@@ -33,14 +32,14 @@ namespace Lucene.Net.Analysis.TokenAttributes
         private string type;
 
         /// <summary>
-        /// Initialize this attribute with <seealso cref="TypeAttribute#DEFAULT_TYPE"/> </summary>
+        /// Initialize this attribute with <see cref="TypeAttribute_Fields.DEFAULT_TYPE"/> </summary>
         public TypeAttribute()
             : this(TypeAttribute_Fields.DEFAULT_TYPE)
         {
         }
 
         /// <summary>
-        /// Initialize this attribute with <code>type</code> </summary>
+        /// Initialize this attribute with <paramref name="type"/> </summary>
         public TypeAttribute(string type)
         {
             this.type = type;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenFilter.cs b/src/Lucene.Net.Core/Analysis/TokenFilter.cs
index 9b5957e..9a6c715 100644
--- a/src/Lucene.Net.Core/Analysis/TokenFilter.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenFilter.cs
@@ -18,10 +18,11 @@ namespace Lucene.Net.Analysis
      */
 
     /// <summary>
-    /// A TokenFilter is a TokenStream whose input is another TokenStream.
-    ///  <p>
-    ///  this is an abstract class; subclasses must override <seealso cref="#IncrementToken()"/>. </summary>
-    ///  <seealso cref= TokenStream </seealso>
+    /// A <see cref="TokenFilter"/> is a <see cref="TokenStream"/> whose input is another <see cref="TokenStream"/>.
+    /// <para/>
+    /// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()"/>.
+    /// </summary>
+    /// <seealso cref="TokenStream"/>
     public abstract class TokenFilter : TokenStream
     {
         /// <summary>
@@ -37,23 +38,40 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// {@inheritDoc}
-        /// <p>
+        /// This method is called by the consumer after the last token has been
+        /// consumed, after <see cref="IncrementToken()"/> returned <c>false</c>
+        /// (using the new <see cref="TokenStream"/> API). Streams implementing the old API
+        /// should upgrade to use this feature.
+        /// <para/>
+        /// This method can be used to perform any end-of-stream operations, such as
+        /// setting the final offset of a stream. The final offset of a stream might
+        /// differ from the offset of the last token eg in case one or more whitespaces
+        /// followed after the last token, but a WhitespaceTokenizer was used.
+        /// <para/>
+        /// Additionally any skipped positions (such as those removed by a stopfilter)
+        /// can be applied to the position increment, or any adjustment of other
+        /// attributes where the end-of-stream value may be important.
+        /// <para/>
         /// <b>NOTE:</b>
         /// The default implementation chains the call to the input TokenStream, so
-        /// be sure to call <code>super.end()</code> first when overriding this method.
+        /// be sure to call <c>base.End()</c> first when overriding this method.
         /// </summary>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurs </exception>
         public override void End()
         {
             m_input.End();
         }
 
         /// <summary>
-        /// {@inheritDoc}
-        /// <p>
+        /// Releases resources associated with this stream.
+        /// <para/>
+        /// If you override this method, always call <c>base.Dispose()</c>, otherwise
+        /// some internal state will not be correctly reset (e.g., <see cref="Tokenizer"/> will
+        /// throw <see cref="InvalidOperationException"/> on reuse).
+        /// <para/>
         /// <b>NOTE:</b>
         /// The default implementation chains the call to the input TokenStream, so
-        /// be sure to call <code>super.Dispose()</code> when overriding this method.
+        /// be sure to call <c>base.Dispose()</c> when overriding this method.
         /// </summary>
         public override void Dispose()
         {
@@ -62,7 +80,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// This method is called by a consumer before it begins consumption using
-        /// <see cref="IncrementToken()"/>.
+        /// <see cref="TokenStream.IncrementToken()"/>.
         /// <para/>
         /// Resets this stream to a clean state. Stateful implementations must implement
         /// this method so that they can be reused, just as if they had been created fresh.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenStream.cs b/src/Lucene.Net.Core/Analysis/TokenStream.cs
index d2c34c9..75a4bef 100644
--- a/src/Lucene.Net.Core/Analysis/TokenStream.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenStream.cs
@@ -26,63 +26,62 @@ namespace Lucene.Net.Analysis
     using AttributeSource = Lucene.Net.Util.AttributeSource;
 
     /// <summary>
-    /// A <code>TokenStream</code> enumerates the sequence of tokens, either from
-    /// <seealso cref="Field"/>s of a <seealso cref="Document"/> or from query text.
-    /// <p>
+    /// A <see cref="TokenStream"/> enumerates the sequence of tokens, either from
+    /// <see cref="Documents.Field"/>s of a <see cref="Documents.Document"/> or from query text.
+    /// <para/>
     /// this is an abstract class; concrete subclasses are:
-    /// <ul>
-    /// <li><seealso cref="Tokenizer"/>, a <code>TokenStream</code> whose input is a TextReader; and
-    /// <li><seealso cref="TokenFilter"/>, a <code>TokenStream</code> whose input is another
-    /// <code>TokenStream</code>.
-    /// </ul>
-    /// A new <code>TokenStream</code> API has been introduced with Lucene 2.9. this API
-    /// has moved from being <seealso cref="Token"/>-based to <seealso cref="Attribute"/>-based. While
-    /// <seealso cref="Token"/> still exists in 2.9 as a convenience class, the preferred way
-    /// to store the information of a <seealso cref="Token"/> is to use <seealso cref="AttributeImpl"/>s.
-    /// <p>
-    /// <code>TokenStream</code> now extends <seealso cref="AttributeSource"/>, which provides
-    /// access to all of the token <seealso cref="Attribute"/>s for the <code>TokenStream</code>.
-    /// Note that only one instance per <seealso cref="AttributeImpl"/> is created and reused
-    /// for every token. this approach reduces object creation and allows local
-    /// caching of references to the <seealso cref="AttributeImpl"/>s. See
-    /// <seealso cref="#IncrementToken()"/> for further details.
-    /// <p>
-    /// <b>The workflow of the new <code>TokenStream</code> API is as follows:</b>
-    /// <ol>
-    /// <li>Instantiation of <code>TokenStream</code>/<seealso cref="TokenFilter"/>s which add/get
-    /// attributes to/from the <seealso cref="AttributeSource"/>.
-    /// <li>The consumer calls <seealso cref="TokenStream#reset()"/>.
-    /// <li>The consumer retrieves attributes from the stream and stores local
-    /// references to all attributes it wants to access.
-    /// <li>The consumer calls <seealso cref="#IncrementToken()"/> until it returns false
-    /// consuming the attributes after each call.
-    /// <li>The consumer calls <seealso cref="#end()"/> so that any end-of-stream operations
-    /// can be performed.
-    /// <li>The consumer calls <seealso cref="#close()"/> to release any resource when finished
-    /// using the <code>TokenStream</code>.
-    /// </ol>
+    /// <list type="bullet">
+    ///     <item><see cref="Tokenizer"/>, a <see cref="TokenStream"/> whose input is a <see cref="System.IO.TextReader"/>; and</item>
+    ///     <item><see cref="TokenFilter"/>, a <see cref="TokenStream"/> whose input is another
+    ///         <see cref="TokenStream"/>.</item>
+    /// </list>
+    /// A new <see cref="TokenStream"/> API has been introduced with Lucene 2.9. this API
+    /// has moved from being <see cref="Token"/>-based to <see cref="Util.IAttribute"/>-based. While
+    /// <see cref="Token"/> still exists in 2.9 as a convenience class, the preferred way
+    /// to store the information of a <see cref="Token"/> is to use <see cref="Attribute"/>s.
+    /// <para/>
+    /// <see cref="TokenStream"/> now extends <see cref="AttributeSource"/>, which provides
+    /// access to all of the token <see cref="Util.IAttribute"/>s for the <see cref="TokenStream"/>.
+    /// Note that only one instance per <see cref="Attribute"/> is created and reused
+    /// for every token. This approach reduces object creation and allows local
+    /// caching of references to the <see cref="Attribute"/>s. See
+    /// <see cref="IncrementToken()"/> for further details.
+    /// <para/>
+    /// <b>The workflow of the new <see cref="TokenStream"/> API is as follows:</b>
+    /// <list type="number">
+    ///     <item>Instantiation of <see cref="TokenStream"/>/<see cref="TokenFilter"/>s which add/get
+    ///         attributes to/from the <see cref="AttributeSource"/>.</item>
+    ///     <item>The consumer calls <see cref="TokenStream.Reset()"/>.</item>
+    ///     <item>The consumer retrieves attributes from the stream and stores local
+    ///         references to all attributes it wants to access.</item>
+    ///     <item>The consumer calls <see cref="IncrementToken()"/> until it returns false
+    ///         consuming the attributes after each call.</item>
+    ///     <item>The consumer calls <see cref="End()"/> so that any end-of-stream operations
+    ///         can be performed.</item>
+    ///     <item>The consumer calls <see cref="Dispose()"/> to release any resource when finished
+    ///         using the <see cref="TokenStream"/>.</item>
+    /// </list>
     /// To make sure that filters and consumers know which attributes are available,
     /// the attributes must be added during instantiation. Filters and consumers are
     /// not required to check for availability of attributes in
-    /// <seealso cref="#IncrementToken()"/>.
-    /// <p>
-    /// You can find some example code for the new API in the analysis package level
-    /// Javadoc.
-    /// <p>
-    /// Sometimes it is desirable to capture a current state of a <code>TokenStream</code>,
-    /// e.g., for buffering purposes (see <seealso cref="CachingTokenFilter"/>,
+    /// <see cref="IncrementToken()"/>.
+    /// <para/>
+    /// You can find some example code for the new API in the analysis 
+    /// documentation.
+    /// <para/>
+    /// Sometimes it is desirable to capture a current state of a <see cref="TokenStream"/>,
+    /// e.g., for buffering purposes (see <see cref="CachingTokenFilter"/>,
     /// TeeSinkTokenFilter). For this usecase
-    /// <seealso cref="AttributeSource#captureState"/> and <seealso cref="AttributeSource#restoreState"/>
+    /// <see cref="AttributeSource.CaptureState"/> and <see cref="AttributeSource.RestoreState"/>
     /// can be used.
-    /// <p>The {@code TokenStream}-API in Lucene is based on the decorator pattern.
-    /// Therefore all non-abstract subclasses must be final or have at least a final
-    /// implementation of <seealso cref="#incrementToken"/>! this is checked when Java
-    /// assertions are enabled.
+    /// <para/>The <see cref="TokenStream"/>-API in Lucene is based on the decorator pattern.
+    /// Therefore all non-abstract subclasses must be sealed or have at least a sealed
+    /// implementation of <see cref="IncrementToken()"/>! This is checked when assertions are enabled.
     /// </summary>
     public abstract class TokenStream : AttributeSource, IDisposable
     {
         /// <summary>
-        /// A TokenStream using the default attribute factory.
+        /// A <see cref="TokenStream"/> using the default attribute factory.
         /// </summary>
         protected TokenStream()
         {
@@ -90,7 +89,7 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// A TokenStream that uses the same attributes as the supplied one.
+        /// A <see cref="TokenStream"/> that uses the same attributes as the supplied one.
         /// </summary>
         protected TokenStream(AttributeSource input)
             : base(input)
@@ -99,7 +98,8 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// A TokenStream using the supplied AttributeFactory for creating new <seealso cref="Attribute"/> instances.
+        /// A <see cref="TokenStream"/> using the supplied <see cref="AttributeSource.AttributeFactory"/> 
+        /// for creating new <see cref="Util.IAttribute"/> instances.
         /// </summary>
         protected TokenStream(AttributeFactory factory)
             : base(factory)
@@ -154,23 +154,23 @@ namespace Lucene.Net.Analysis
         public abstract bool IncrementToken();
 
         /// <summary>
-        /// this method is called by the consumer after the last token has been
-        /// consumed, after <seealso cref="#IncrementToken()"/> returned <code>false</code>
-        /// (using the new <code>TokenStream</code> API). Streams implementing the old API
+        /// This method is called by the consumer after the last token has been
+        /// consumed, after <see cref="IncrementToken()"/> returned <c>false</c>
+        /// (using the new <see cref="TokenStream"/> API). Streams implementing the old API
         /// should upgrade to use this feature.
-        /// <p/>
-        /// this method can be used to perform any end-of-stream operations, such as
+        /// <para/>
+        /// This method can be used to perform any end-of-stream operations, such as
         /// setting the final offset of a stream. The final offset of a stream might
         /// differ from the offset of the last token eg in case one or more whitespaces
         /// followed after the last token, but a WhitespaceTokenizer was used.
-        /// <p>
+        /// <para/>
         /// Additionally any skipped positions (such as those removed by a stopfilter)
         /// can be applied to the position increment, or any adjustment of other
         /// attributes where the end-of-stream value may be important.
-        /// <p>
-        /// If you override this method, always call {@code super.end()}.
+        /// <para/>
+        /// If you override this method, always call <c>base.End();</c>.
         /// </summary>
-        /// <exception cref="IOException"> If an I/O error occurs </exception>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurs </exception>
         public virtual void End()
         {
             ClearAttributes(); // LUCENE-3849: don't consume dirty atts
@@ -199,10 +199,10 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Releases resources associated with this stream.
-        /// <p>
-        /// If you override this method, always call {@code super.Dispose()}, otherwise
-        /// some internal state will not be correctly reset (e.g., <seealso cref="Tokenizer"/> will
-        /// throw <seealso cref="IllegalStateException"/> on reuse).
+        /// <para/>
+        /// If you override this method, always call <c>base.Dispose()</c>, otherwise
+        /// some internal state will not be correctly reset (e.g., <see cref="Tokenizer"/> will
+        /// throw <see cref="InvalidOperationException"/> on reuse).
         /// </summary>
         public virtual void Dispose()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/TokenStreamToAutomaton.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/TokenStreamToAutomaton.cs b/src/Lucene.Net.Core/Analysis/TokenStreamToAutomaton.cs
index 8d80198..f2fb739 100644
--- a/src/Lucene.Net.Core/Analysis/TokenStreamToAutomaton.cs
+++ b/src/Lucene.Net.Core/Analysis/TokenStreamToAutomaton.cs
@@ -30,11 +30,11 @@ namespace Lucene.Net.Analysis
     // TODO: maybe also toFST?  then we can translate atts into FST outputs/weights
 
     /// <summary>
-    /// Consumes a TokenStream and creates an <seealso cref="Automaton"/>
-    ///  where the transition labels are UTF8 bytes (or Unicode
-    ///  code points if unicodeArcs is true) from the {@link
-    ///  TermToBytesRefAttribute}.  Between tokens we insert
-    ///  POS_SEP and for holes we insert HOLE.
+    /// Consumes a <see cref="TokenStream"/> and creates an <see cref="Automaton"/>
+    /// where the transition labels are UTF8 bytes (or Unicode
+    /// code points if unicodeArcs is true) from the <see cref="ITermToBytesRefAttribute"/>.
+    /// Between tokens we insert
+    /// <see cref="POS_SEP"/> and for holes we insert <see cref="HOLE"/>.
     ///
     /// @lucene.experimental
     /// </summary>
@@ -53,7 +53,7 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Whether to generate holes in the automaton for missing positions, <code>true</code> by default. </summary>
+        /// Whether to generate holes in the automaton for missing positions, <c>true</c> by default. </summary>
         public virtual bool PreservePositionIncrements
         {
             get
@@ -68,7 +68,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Whether to make transition labels Unicode code points instead of UTF8 bytes,
-        ///  <code>false</code> by default
+        /// <c>false</c> by default
         /// </summary>
         public virtual bool UnicodeArcs
         {
@@ -114,9 +114,9 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Subclass & implement this if you need to change the
-        ///  token (such as escaping certain bytes) before it's
-        ///  turned into a graph.
+        /// Subclass &amp; implement this if you need to change the
+        /// token (such as escaping certain bytes) before it's
+        /// turned into a graph.
         /// </summary>
         protected internal virtual BytesRef ChangeToken(BytesRef @in)
         {
@@ -132,11 +132,10 @@ namespace Lucene.Net.Analysis
         public const int HOLE = 0x001e;
 
         /// <summary>
-        /// Pulls the graph (including {@link
-        ///  PositionLengthAttribute}) from the provided {@link
-        ///  TokenStream}, and creates the corresponding
-        ///  automaton where arcs are bytes (or Unicode code points
-        ///  if unicodeArcs = true) from each term.
+        /// Pulls the graph (including <see cref="IPositionLengthAttribute"/>
+        /// from the provided <see cref="TokenStream"/>, and creates the corresponding
+        /// automaton where arcs are bytes (or Unicode code points
+        /// if unicodeArcs = true) from each term.
         /// </summary>
         public virtual Automaton ToAutomaton(TokenStream @in)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37c78c2e/src/Lucene.Net.Core/Analysis/Tokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/Tokenizer.cs b/src/Lucene.Net.Core/Analysis/Tokenizer.cs
index b5e178d..3176f01 100644
--- a/src/Lucene.Net.Core/Analysis/Tokenizer.cs
+++ b/src/Lucene.Net.Core/Analysis/Tokenizer.cs
@@ -22,22 +22,22 @@ namespace Lucene.Net.Analysis
      */
 
     /// <summary>
-    /// A Tokenizer is a TokenStream whose input is a TextReader.
-    ///  <p>
-    ///  this is an abstract class; subclasses must override <seealso cref="#IncrementToken()"/>
-    ///  <p>
-    ///  NOTE: Subclasses overriding <seealso cref="#IncrementToken()"/> must
-    ///  call <seealso cref="AttributeSource#ClearAttributes()"/> before
-    ///  setting attributes.
+    /// A <see cref="Tokenizer"/> is a <see cref="TokenStream"/> whose input is a <see cref="TextReader"/>.
+    /// <para/>
+    /// This is an abstract class; subclasses must override <seealso cref="#IncrementToken()"/>
+    /// <para/>
+    /// NOTE: Subclasses overriding <see cref="TokenStream.IncrementToken()"/> must
+    /// call <see cref="Util.AttributeSource.ClearAttributes()"/> before
+    /// setting attributes.
     /// </summary>
     public abstract class Tokenizer : TokenStream
     {
         /// <summary>
-        /// The text source for this Tokenizer. </summary>
+        /// The text source for this <see cref="Tokenizer"/>. </summary>
         protected TextReader m_input = ILLEGAL_STATE_READER;
 
         /// <summary>
-        /// Pending reader: not actually assigned to input until reset() </summary>
+        /// Pending reader: not actually assigned to input until <see cref="Reset()"/> </summary>
         private TextReader inputPending = ILLEGAL_STATE_READER;
 
         /// <summary>
@@ -52,7 +52,8 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Construct a token stream processing the given input using the given AttributeFactory. </summary>
+        /// Construct a token stream processing the given input using the given <see cref="Util.AttributeSource.AttributeFactory"/>.
+        /// </summary>
         protected internal Tokenizer(AttributeFactory factory, TextReader input)
             : base(factory)
         {
@@ -64,12 +65,17 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// {@inheritDoc}
-        /// <p>
-        /// <b>NOTE:</b>
-        /// The default implementation closes the input TextReader, so
-        /// be sure to call <code>super.Dispose()</code> when overriding this method.
+        /// Releases resources associated with this stream.
+        /// <para/>
+        /// If you override this method, always call <c>base.Dispose()</c>, otherwise
+        /// some internal state will not be correctly reset (e.g., <see cref="Tokenizer"/> will
+        /// throw <see cref="InvalidOperationException"/> on reuse).
         /// </summary>
+        /// <remarks>
+        /// <b>NOTE:</b>
+        /// The default implementation closes the input <see cref="TextReader"/>, so
+        /// be sure to call <c>base.Dispose()</c> when overriding this method.
+        /// </remarks>
         public override void Dispose()
         {
             m_input.Dispose();
@@ -80,20 +86,20 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>
-        /// Return the corrected offset. If <seealso cref="#input"/> is a <seealso cref="CharFilter"/> subclass
-        /// this method calls <seealso cref="CharFilter#correctOffset"/>, else returns <code>currentOff</code>. </summary>
+        /// Return the corrected offset. If <see cref="m_input"/> is a <see cref="CharFilter"/> subclass
+        /// this method calls <see cref="CharFilter.CorrectOffset"/>, else returns <paramref name="currentOff"/>. </summary>
         /// <param name="currentOff"> offset as seen in the output </param>
         /// <returns> corrected offset based on the input </returns>
-        /// <seealso> cref= CharFilter#correctOffset </seealso>
+        /// <seealso cref="CharFilter.CorrectOffset(int)"/>
         protected internal int CorrectOffset(int currentOff)
         {
             return (m_input is CharFilter) ? ((CharFilter)m_input).CorrectOffset(currentOff) : currentOff;
         }
 
         /// <summary>
-        /// Expert: Set a new reader on the Tokenizer.  Typically, an
-        ///  analyzer (in its tokenStream method) will use
-        ///  this to re-use a previously created tokenizer.
+        /// Expert: Set a new reader on the <see cref="Tokenizer"/>. Typically, an
+        /// analyzer (in its tokenStream method) will use
+        /// this to re-use a previously created tokenizer.
         /// </summary>
         public void SetReader(TextReader input)
         {


Mime
View raw message