lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ccurr...@apache.org
Subject [Lucene.Net] svn commit: r1198132 [4/17] - in /incubator/lucene.net/trunk/src: contrib/Analyzers/AR/ contrib/Analyzers/BR/ contrib/Analyzers/CJK/ contrib/Analyzers/Cz/ contrib/Analyzers/De/ contrib/Analyzers/Fr/ contrib/Analyzers/Miscellaneous/ contrib/Analyzers/NG...
Date Sun, 06 Nov 2011 05:24:44 GMT
Modified: incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs Sun Nov  6 05:24:26 2011
@@ -34,13 +34,13 @@ namespace Lucene.Net.Analysis.Standard
 	/// 
 	/// <p/> This should be a good tokenizer for most European-language documents:
 	/// 
-	/// <ul>
-	/// <li>Splits words at punctuation characters, removing punctuation. However, a 
-	/// dot that's not followed by whitespace is considered part of a token.</li>
-	/// <li>Splits words at hyphens, unless there's a number in the token, in which case
-	/// the whole token is interpreted as a product number and is not split.</li>
-	/// <li>Recognizes email addresses and internet hostnames as one token.</li>
-	/// </ul>
+	/// <list type="bullet">
+	/// <item>Splits words at punctuation characters, removing punctuation. However, a 
+	/// dot that's not followed by whitespace is considered part of a token.</item>
+	/// <item>Splits words at hyphens, unless there's a number in the token, in which case
+	/// the whole token is interpreted as a product number and is not split.</item>
+	/// <item>Recognizes email addresses and internet hostnames as one token.</item>
+	/// </list>
 	/// 
 	/// <p/>Many applications have specific tokenizer needs.  If this tokenizer does
 	/// not suit your application, please consider copying this source code
@@ -48,12 +48,12 @@ namespace Lucene.Net.Analysis.Standard
 	/// 
 	/// <a name="version"/>
 	/// <p/>
-	/// You must specify the required {@link Version} compatibility when creating
+	/// You must specify the required <see cref="Version" /> compatibility when creating
 	/// StandardAnalyzer:
-	/// <ul>
-	/// <li>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
-	/// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></li>
-	/// </ul>
+	/// <list type="bullet">
+	/// <item>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
+	/// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></item>
+	/// </list>
 	/// </summary>
 	
 	public class StandardTokenizer:Tokenizer
@@ -84,7 +84,7 @@ namespace Lucene.Net.Analysis.Standard
 		/// <summary>String token types that correspond to token type int constants </summary>
 		public static readonly System.String[] TOKEN_TYPES = new System.String[]{"<ALPHANUM>", "<APOSTROPHE>", "<ACRONYM>", "<COMPANY>", "<EMAIL>", "<HOST>", "<NUM>", "<CJ>", "<ACRONYM_DEP>"};
 		
-		/// <deprecated> Please use {@link #TOKEN_TYPES} instead 
+		/// <deprecated> Please use <see cref="TOKEN_TYPES" /> instead 
 		/// </deprecated>
         [Obsolete("Please use TOKEN_TYPES instead")]
 		public static readonly System.String[] tokenImage = TOKEN_TYPES;
@@ -110,25 +110,25 @@ namespace Lucene.Net.Analysis.Standard
 			this.maxTokenLength = length;
 		}
 		
-		/// <seealso cref="setMaxTokenLength">
+		/// <seealso cref="SetMaxTokenLength">
 		/// </seealso>
 		public virtual int GetMaxTokenLength()
 		{
 			return maxTokenLength;
 		}
 		
-		/// <summary> Creates a new instance of the {@link StandardTokenizer}. Attaches the
-		/// <code>input</code> to a newly created JFlex scanner.
+		/// <summary> Creates a new instance of the <see cref="StandardTokenizer" />. Attaches the
+		/// <c>input</c> to a newly created JFlex scanner.
 		/// </summary>
-		/// <deprecated> Use {@link #StandardTokenizer(Version, Reader)} instead
+        /// <deprecated> Use <see cref="StandardTokenizer(Version, System.IO.TextReader)" /> instead
 		/// </deprecated>
         [Obsolete("Use StandardTokenizer(Version, Reader) instead")]
 		public StandardTokenizer(System.IO.TextReader input):this(Version.LUCENE_24, input)
 		{
 		}
 		
-		/// <summary> Creates a new instance of the {@link Lucene.Net.Analysis.Standard.StandardTokenizer}.  Attaches
-		/// the <code>input</code> to the newly created JFlex scanner.
+		/// <summary> Creates a new instance of the <see cref="Lucene.Net.Analysis.Standard.StandardTokenizer" />.  Attaches
+		/// the <c>input</c> to the newly created JFlex scanner.
 		/// 
 		/// </summary>
 		/// <param name="input">The input reader
@@ -137,7 +137,7 @@ namespace Lucene.Net.Analysis.Standard
 		/// 
 		/// See http://issues.apache.org/jira/browse/LUCENE-1068
 		/// </param>
-		/// <deprecated> Use {@link #StandardTokenizer(Version, Reader)} instead
+        /// <deprecated> Use <see cref="StandardTokenizer(Version, System.IO.TextReader)" /> instead
 		/// </deprecated>
         [Obsolete("Use StandardTokenizer(Version, Reader) instead")]
 		public StandardTokenizer(System.IO.TextReader input, bool replaceInvalidAcronym):base()
@@ -146,26 +146,27 @@ namespace Lucene.Net.Analysis.Standard
 			this.scanner = new StandardTokenizerImpl(input);
 			Init(input, replaceInvalidAcronym);
 		}
-		
-		/// <summary> Creates a new instance of the
-		/// {@link org.apache.lucene.analysis.standard.StandardTokenizer}. Attaches
-		/// the <code>input</code> to the newly created JFlex scanner.
-		/// 
-		/// </summary>
-		/// <param name="input">The input reader
-		/// 
-		/// See http://issues.apache.org/jira/browse/LUCENE-1068
-		/// </param>
-		public StandardTokenizer(Version matchVersion, System.IO.TextReader input):base()
+
+	    /// <summary> Creates a new instance of the
+	    /// <see cref="Lucene.Net.Analysis.Standard.StandardTokenizer" />. Attaches
+	    /// the <c>input</c> to the newly created JFlex scanner.
+	    /// 
+	    /// </summary>
+	    /// <param name="matchVersion"></param>
+	    /// <param name="input">The input reader
+	    /// 
+	    /// See http://issues.apache.org/jira/browse/LUCENE-1068
+	    /// </param>
+	    public StandardTokenizer(Version matchVersion, System.IO.TextReader input):base()
 		{
 			InitBlock();
 			this.scanner = new StandardTokenizerImpl(input);
 			Init(input, matchVersion);
 		}
 		
-		/// <summary> Creates a new StandardTokenizer with a given {@link AttributeSource}. </summary>
+		/// <summary> Creates a new StandardTokenizer with a given <see cref="AttributeSource" />. </summary>
 		/// <deprecated> Use
-		/// {@link #StandardTokenizer(Version, AttributeSource, Reader)}
+        /// <see cref="StandardTokenizer(Version, AttributeSource, System.IO.TextReader)" />
 		/// instead
 		/// </deprecated>
         [Obsolete("Use StandardTokenizer(Version, AttributeSource, Reader) instead")]
@@ -176,7 +177,7 @@ namespace Lucene.Net.Analysis.Standard
 			Init(input, replaceInvalidAcronym);
 		}
 		
-		/// <summary> Creates a new StandardTokenizer with a given {@link AttributeSource}.</summary>
+		/// <summary> Creates a new StandardTokenizer with a given <see cref="AttributeSource" />.</summary>
 		public StandardTokenizer(Version matchVersion, AttributeSource source, System.IO.TextReader input):base(source)
 		{
 			InitBlock();
@@ -184,9 +185,9 @@ namespace Lucene.Net.Analysis.Standard
 			Init(input, matchVersion);
 		}
 		
-		/// <summary> Creates a new StandardTokenizer with a given {@link Lucene.Net.Util.AttributeSource.AttributeFactory} </summary>
+		/// <summary> Creates a new StandardTokenizer with a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" /> </summary>
 		/// <deprecated> Use
-		/// {@link #StandardTokenizer(Version, org.apache.lucene.util.AttributeSource.AttributeFactory, Reader)}
+        /// <see cref="StandardTokenizer(Version, AttributeSource.AttributeFactory, System.IO.TextReader)" />
 		/// instead
 		/// </deprecated>
         [Obsolete("Use StandardTokenizer(Version, Lucene.Net.Util.AttributeSource.AttributeFactory, Reader) instead")]
@@ -198,7 +199,7 @@ namespace Lucene.Net.Analysis.Standard
 		}
 		
 		/// <summary> Creates a new StandardTokenizer with a given
-		/// {@link org.apache.lucene.util.AttributeSource.AttributeFactory}
+		/// <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />
 		/// </summary>
 		public StandardTokenizer(Version matchVersion, AttributeFactory factory, System.IO.TextReader input):base(factory)
 		{
@@ -236,11 +237,11 @@ namespace Lucene.Net.Analysis.Standard
 		private PositionIncrementAttribute posIncrAtt;
 		private TypeAttribute typeAtt;
 		
-		/*
-		* (non-Javadoc)
-		*
-		* @see Lucene.Net.Analysis.TokenStream#next()
-		*/
+		///<summary>
+		/// (non-Javadoc)
+		///
+		/// <see cref="Lucene.Net.Analysis.TokenStream.Next()" />
+        ///</summary>
 		public override bool IncrementToken()
 		{
 			ClearAttributes();

Modified: incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs Sun Nov  6 05:24:26 2011
@@ -158,7 +158,7 @@ namespace Lucene.Net.Analysis.Standard
 		/* error messages for the codes above */
 		private static readonly System.String[] ZZ_ERROR_MSG = new System.String[]{"Unkown internal scanner error", "Error: could not match input", "Error: pushback value was too large"};
 		
-		/// <summary> ZZ_ATTRIBUTE[aState] contains the attributes of state <code>aState</code></summary>
+		/// <summary> ZZ_ATTRIBUTE[aState] contains the attributes of state <c>aState</c></summary>
 		private static readonly int[] ZZ_ATTRIBUTE = ZzUnpackAttribute();
 		
 		private const System.String ZZ_ATTRIBUTE_PACKED_0 = "\x0001\x0000\x0001\x0009\x0003\x0001\x0001\x0009\x0001\x0001\x000B\x0000\x0004\x0001\x0002\x0000" + "\x0001\x0001\x0001\x0000\x000F\x0001\x0001\x0000\x0001\x0001\x0003\x0000\x0005\x0001";
@@ -289,7 +289,7 @@ namespace Lucene.Net.Analysis.Standard
 		/// There is also a java.io.InputStream version of this constructor.
 		/// 
 		/// </summary>
-		/// <param name="in"> the java.io.Reader to read input from.
+        /// <param name="in_Renamed"> the java.io.Reader to read input from.
 		/// </param>
 		internal StandardTokenizerImpl(System.IO.TextReader in_Renamed)
 		{
@@ -300,7 +300,7 @@ namespace Lucene.Net.Analysis.Standard
 		/// There is also java.io.Reader version of this constructor.
 		/// 
 		/// </summary>
-		/// <param name="in"> the java.io.Inputstream to read input from.
+        /// <param name="in_Renamed"> the java.io.Inputstream to read input from.
 		/// </param>
 		internal StandardTokenizerImpl(System.IO.Stream in_Renamed):this(new System.IO.StreamReader(in_Renamed, System.Text.Encoding.Default))
 		{
@@ -331,12 +331,11 @@ namespace Lucene.Net.Analysis.Standard
 		
 		
 		/// <summary> Refills the input buffer.
-		/// 
 		/// </summary>
-		/// <returns>      <code>false</code>, iff there was new input.
+		/// <returns><c>false</c>, iff there was new input.
 		/// 
 		/// </returns>
-		/// <exception cref="java.io.IOException"> if any I/O-Error occurs
+		/// <exception cref="System.IO.IOException"> if any I/O-Error occurs
 		/// </exception>
 		private bool ZzRefill()
 		{
@@ -514,7 +513,7 @@ namespace Lucene.Net.Analysis.Standard
 		/// </summary>
 		/// <returns>      the next token
 		/// </returns>
-		/// <exception cref="java.io.IOException"> if any I/O-Error occurs
+		/// <exception cref="System.IO.IOException"> if any I/O-Error occurs
 		/// </exception>
 		public virtual int GetNextToken()
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs Sun Nov  6 05:24:26 2011
@@ -22,16 +22,16 @@ using Version = Lucene.Net.Util.Version;
 namespace Lucene.Net.Analysis
 {
 	
-	/// <summary> Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and
-	/// {@link StopFilter}.
+	/// <summary> Filters <see cref="LetterTokenizer" /> with <see cref="LowerCaseFilter" /> and
+	/// <see cref="StopFilter" />.
 	/// 
 	/// <a name="version"/>
 	/// <p/>
-	/// You must specify the required {@link Version} compatibility when creating
+	/// You must specify the required <see cref="Version" /> compatibility when creating
 	/// StopAnalyzer:
-	/// <ul>
-	/// <li>As of 2.9, position increments are preserved</li>
-	/// </ul>
+	/// <list type="bullet">
+	/// <item>As of 2.9, position increments are preserved</item>
+	/// </list>
 	/// </summary>
 	
 	public sealed class StopAnalyzer:Analyzer
@@ -45,7 +45,7 @@ namespace Lucene.Net.Analysis
 		/// <summary>An array containing some common English words that are not usually useful
 		/// for searching. 
 		/// </summary>
-		/// <deprecated> Use {@link #ENGLISH_STOP_WORDS_SET} instead 
+		/// <deprecated> Use <see cref="ENGLISH_STOP_WORDS_SET" /> instead 
 		/// </deprecated>
         [Obsolete("Use ENGLISH_STOP_WORDS_SET instead ")]
 		public static readonly System.String[] ENGLISH_STOP_WORDS = new System.String[]{"a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"};
@@ -58,7 +58,7 @@ namespace Lucene.Net.Analysis
 		/// <summary>Builds an analyzer which removes words in
 		/// ENGLISH_STOP_WORDS.
 		/// </summary>
-		/// <deprecated> Use {@link #StopAnalyzer(Version)} instead
+		/// <deprecated> Use <see cref="StopAnalyzer(Version)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopAnalyzer(Version) instead")]
 		public StopAnalyzer()
@@ -80,9 +80,9 @@ namespace Lucene.Net.Analysis
 		/// ENGLISH_STOP_WORDS.
 		/// </summary>
 		/// <param name="enablePositionIncrements">
-		/// See {@link StopFilter#SetEnablePositionIncrements}
+		/// See <see cref="StopFilter.SetEnablePositionIncrements" />
 		/// </param>
-		/// <deprecated> Use {@link #StopAnalyzer(Version)} instead
+		/// <deprecated> Use <see cref="StopAnalyzer(Version)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopAnalyzer(Version) instead")]
 		public StopAnalyzer(bool enablePositionIncrements)
@@ -93,9 +93,9 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given set.</summary>
-		/// <deprecated> Use {@link #StopAnalyzer(Version, Set)} instead
+		/// <deprecated> Use <see cref="StopAnalyzer(Version, System.Collections.Hashtable)" /> instead
 		/// </deprecated>
-        [Obsolete("Use StopAnalyzer(Version, Set) instead")]
+        [Obsolete("Use StopAnalyzer(Version, System.Collections.Hashtable) instead")]
 		public StopAnalyzer(System.Collections.Hashtable stopWords)
 		{
 			this.stopWords = stopWords;
@@ -115,11 +115,11 @@ namespace Lucene.Net.Analysis
 		/// <param name="stopWords">Set of stop words
 		/// </param>
 		/// <param name="enablePositionIncrements">
-		/// See {@link StopFilter#SetEnablePositionIncrements}
+		/// See <see cref="StopFilter.SetEnablePositionIncrements" />
 		/// </param>
-		/// <deprecated> Use {@link #StopAnalyzer(Version, Set)} instead
+		/// <deprecated> Use <see cref="StopAnalyzer(Version, System.Collections.Hashtable)" /> instead
 		/// </deprecated>
-        [Obsolete("Use StopAnalyzer(Version, Set) instead")]
+        [Obsolete("Use StopAnalyzer(Version, System.Collections.Hashtable) instead")]
 		public StopAnalyzer(System.Collections.Hashtable stopWords, bool enablePositionIncrements)
 		{
 			this.stopWords = stopWords;
@@ -128,11 +128,11 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary>Builds an analyzer which removes words in the provided array.</summary>
-		/// <deprecated> Use {@link #StopAnalyzer(Set, boolean)} instead 
+        /// <deprecated> Use <see cref="StopAnalyzer(System.Collections.Hashtable, bool)" /> instead 
 		/// </deprecated>
-		/// <deprecated> Use {@link #StopAnalyzer(Version, Set)} instead
+        /// <deprecated> Use <see cref="StopAnalyzer(Version, System.Collections.Hashtable)" /> instead
 		/// </deprecated>
-        [Obsolete("Use StopAnalyzer(Set, boolean) or StopAnalyzer(Version, Set) instead ")]
+        [Obsolete("Use StopAnalyzer(System.Collections.Hashtable, boolean) or StopAnalyzer(Version, System.Collections.Hashtable) instead ")]
 		public StopAnalyzer(System.String[] stopWords)
 		{
 			this.stopWords = StopFilter.MakeStopSet(stopWords);
@@ -144,9 +144,9 @@ namespace Lucene.Net.Analysis
 		/// <param name="stopWords">Array of stop words
 		/// </param>
 		/// <param name="enablePositionIncrements">
-		/// See {@link StopFilter#SetEnablePositionIncrements}
+		/// See <see cref="StopFilter.SetEnablePositionIncrements" />
 		/// </param>
-		/// <deprecated> Use {@link #StopAnalyzer(Version, Set)} instead
+        /// <deprecated> Use <see cref="StopAnalyzer(Version, System.Collections.Hashtable)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopAnalyzer(Version, Set) instead")]
 		public StopAnalyzer(System.String[] stopWords, bool enablePositionIncrements)
@@ -157,9 +157,9 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given file.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(File)">
+		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
 		/// </seealso>
-		/// <deprecated> Use {@link #StopAnalyzer(Version, File)} instead
+		/// <deprecated> Use <see cref="StopAnalyzer(Version, System.IO.FileInfo)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopAnalyzer(Version, File) instead")]
 		public StopAnalyzer(System.IO.FileInfo stopwordsFile)
@@ -170,14 +170,14 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given file.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(File)">
+		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
 		/// </seealso>
 		/// <param name="stopwordsFile">File to load stop words from
 		/// </param>
 		/// <param name="enablePositionIncrements">
-		/// See {@link StopFilter#SetEnablePositionIncrements}
+		/// See <see cref="StopFilter.SetEnablePositionIncrements" />
 		/// </param>
-		/// <deprecated> Use {@link #StopAnalyzer(Version, File)} instead
+		/// <deprecated> Use <see cref="StopAnalyzer(Version, System.IO.FileInfo)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopAnalyzer(Version, File) instead")]
 		public StopAnalyzer(System.IO.FileInfo stopwordsFile, bool enablePositionIncrements)
@@ -190,7 +190,7 @@ namespace Lucene.Net.Analysis
 		/// <summary> Builds an analyzer with the stop words from the given file.
 		/// 
 		/// </summary>
-		/// <seealso cref="WordlistLoader.getWordSet(File)">
+		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
 		/// </seealso>
 		/// <param name="matchVersion">See <a href="#version">above</a>
 		/// </param>
@@ -204,9 +204,9 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given reader.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.TextReader)">
 		/// </seealso>
-		/// <deprecated> Use {@link #StopAnalyzer(Version, Reader)} instead
+		/// <deprecated> Use <see cref="StopAnalyzer(Version, System.IO.TextReader)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopAnalyzer(Version, Reader) instead")]
 		public StopAnalyzer(System.IO.TextReader stopwords)
@@ -217,14 +217,14 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given reader.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.TextReader)">
 		/// </seealso>
 		/// <param name="stopwords">Reader to load stop words from
 		/// </param>
 		/// <param name="enablePositionIncrements">
-		/// See {@link StopFilter#SetEnablePositionIncrements}
+		/// See <see cref="StopFilter.SetEnablePositionIncrements" />
 		/// </param>
-		/// <deprecated> Use {@link #StopAnalyzer(Version, Reader)} instead
+		/// <deprecated> Use <see cref="StopAnalyzer(Version, System.IO.TextReader)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopAnalyzer(Version, Reader) instead")]
 		public StopAnalyzer(System.IO.TextReader stopwords, bool enablePositionIncrements)
@@ -235,7 +235,7 @@ namespace Lucene.Net.Analysis
 		}
 
         /// <summary>Builds an analyzer with the stop words from the given reader. </summary>
-        /// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+        /// <seealso cref="WordlistLoader.GetWordSet(System.IO.TextReader)">
         /// </seealso>
         /// <param name="matchVersion">See <a href="#Version">above</a>
         /// </param>

Modified: incubator/lucene.net/trunk/src/core/Analysis/StopFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/StopFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/StopFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/StopFilter.cs Sun Nov  6 05:24:26 2011
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis
 		private PositionIncrementAttribute posIncrAtt;
 		
 		/// <summary> Construct a token stream filtering the given input.</summary>
-		/// <deprecated> Use {@link #StopFilter(boolean, TokenStream, String[])} instead
+		/// <deprecated> Use <see cref="StopFilter(bool, TokenStream, String[])" /> instead
 		/// </deprecated>
         [Obsolete("Use StopFilter(bool, TokenStream, String[]) instead")]
 		public StopFilter(TokenStream input, System.String[] stopWords):this(ENABLE_POSITION_INCREMENTS_DEFAULT, input, stopWords, false)
@@ -55,7 +55,7 @@ namespace Lucene.Net.Analysis
 		/// </param>
 		/// <param name="stopWords">array of stop words
 		/// </param>
-		/// <deprecated> Use {@link #StopFilter(boolean, TokenStream, Set)} instead.
+		/// <deprecated> Use <see cref="StopFilter(bool, TokenStream, System.Collections.Hashtable)" /> instead.
 		/// </deprecated>
         [Obsolete("Use StopFilter(bool, TokenStream, Hashtable) instead.")]
 		public StopFilter(bool enablePositionIncrements, TokenStream input, System.String[] stopWords):this(enablePositionIncrements, input, stopWords, false)
@@ -65,9 +65,9 @@ namespace Lucene.Net.Analysis
 		/// <summary> Constructs a filter which removes words from the input
 		/// TokenStream that are named in the array of words.
 		/// </summary>
-		/// <deprecated> Use {@link #StopFilter(boolean, TokenStream, String[], boolean)} instead
+		/// <deprecated> Use <see cref="StopFilter(bool, TokenStream, String[], bool)" /> instead
 		/// </deprecated>
-        [Obsolete("Use {@link #StopFilter(bool, TokenStream, String[], bool)} instead")]
+        [Obsolete("Use StopFilter(bool, TokenStream, String[], bool) instead")]
 		public StopFilter(TokenStream in_Renamed, System.String[] stopWords, bool ignoreCase):this(ENABLE_POSITION_INCREMENTS_DEFAULT, in_Renamed, stopWords, ignoreCase)
 		{
 		}
@@ -77,13 +77,13 @@ namespace Lucene.Net.Analysis
 		/// </summary>
 		/// <param name="enablePositionIncrements">true if token positions should record the removed stop words
 		/// </param>
-		/// <param name="in">input TokenStream
+		///  <param name="in_Renamed">input TokenStream
 		/// </param>
 		/// <param name="stopWords">array of stop words
 		/// </param>
 		/// <param name="ignoreCase">true if case is ignored
 		/// </param>
-		/// <deprecated> Use {@link #StopFilter(boolean, TokenStream, Set, boolean)} instead.
+		/// <deprecated> Use <see cref="StopFilter(bool, TokenStream, System.Collections.Hashtable, bool)" /> instead.
 		/// </deprecated>
         [Obsolete("Use StopFilter(bool, TokenStream, Hashtable, bool) instead.")]
 		public StopFilter(bool enablePositionIncrements, TokenStream in_Renamed, System.String[] stopWords, bool ignoreCase):base(in_Renamed)
@@ -95,13 +95,13 @@ namespace Lucene.Net.Analysis
 		
 		
 		/// <summary> Construct a token stream filtering the given input.
-		/// If <code>stopWords</code> is an instance of {@link CharArraySet} (true if
-		/// <code>makeStopSet()</code> was used to construct the set) it will be directly used
-		/// and <code>ignoreCase</code> will be ignored since <code>CharArraySet</code>
+		/// If <c>stopWords</c> is an instance of <see cref="CharArraySet" /> (true if
+		/// <c>makeStopSet()</c> was used to construct the set) it will be directly used
+		/// and <c>ignoreCase</c> will be ignored since <c>CharArraySet</c>
 		/// directly controls case sensitivity.
 		/// <p/>
-		/// If <code>stopWords</code> is not an instance of {@link CharArraySet},
-		/// a new CharArraySet will be constructed and <code>ignoreCase</code> will be
+		/// If <c>stopWords</c> is not an instance of <see cref="CharArraySet" />,
+		/// a new CharArraySet will be constructed and <c>ignoreCase</c> will be
 		/// used to specify the case sensitivity of that set.
 		/// 
 		/// </summary>
@@ -111,7 +111,7 @@ namespace Lucene.Net.Analysis
 		/// </param>
 		/// <param name="ignoreCase">-Ignore case when stopping.
 		/// </param>
-		/// <deprecated> Use {@link #StopFilter(boolean, TokenStream, Set, boolean)} instead
+		/// <deprecated> Use <see cref="StopFilter(bool, TokenStream, System.Collections.Hashtable, bool)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopFilter(bool, TokenStream, Set, bool) instead")]
 		public StopFilter(TokenStream input, System.Collections.Hashtable stopWords, bool ignoreCase):this(ENABLE_POSITION_INCREMENTS_DEFAULT, input, stopWords, ignoreCase)
@@ -119,13 +119,13 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> Construct a token stream filtering the given input.
-		/// If <code>stopWords</code> is an instance of {@link CharArraySet} (true if
-		/// <code>makeStopSet()</code> was used to construct the set) it will be directly used
-		/// and <code>ignoreCase</code> will be ignored since <code>CharArraySet</code>
+		/// If <c>stopWords</c> is an instance of <see cref="CharArraySet" /> (true if
+		/// <c>makeStopSet()</c> was used to construct the set) it will be directly used
+		/// and <c>ignoreCase</c> will be ignored since <c>CharArraySet</c>
 		/// directly controls case sensitivity.
 		/// <p/>
-		/// If <code>stopWords</code> is not an instance of {@link CharArraySet},
-		/// a new CharArraySet will be constructed and <code>ignoreCase</code> will be
+		/// If <c>stopWords</c> is not an instance of <see cref="CharArraySet" />,
+		/// a new CharArraySet will be constructed and <c>ignoreCase</c> will be
 		/// used to specify the case sensitivity of that set.
 		/// 
 		/// </summary>
@@ -156,9 +156,9 @@ namespace Lucene.Net.Analysis
 		/// TokenStream that are named in the Set.
 		/// 
 		/// </summary>
-		/// <seealso cref="MakeStopSet(java.lang.String[])">
+		/// <seealso cref="MakeStopSet(String[])">
 		/// </seealso>
-		/// <deprecated> Use {@link #StopFilter(boolean, TokenStream, Set)} instead
+		/// <deprecated> Use <see cref="StopFilter(bool, TokenStream, System.Collections.Hashtable)" /> instead
 		/// </deprecated>
         [Obsolete("Use StopFilter(bool, TokenStream, Hashtable) instead")]
 		public StopFilter(TokenStream in_Renamed, System.Collections.Hashtable stopWords):this(ENABLE_POSITION_INCREMENTS_DEFAULT, in_Renamed, stopWords, false)
@@ -171,11 +171,11 @@ namespace Lucene.Net.Analysis
 		/// </summary>
 		/// <param name="enablePositionIncrements">true if token positions should record the removed stop words
 		/// </param>
-		/// <param name="in">Input stream
+		///  <param name="in_Renamed">Input stream
 		/// </param>
 		/// <param name="stopWords">The set of Stop Words.
 		/// </param>
-		/// <seealso cref="MakeStopSet(java.lang.String[])">
+		/// <seealso cref="MakeStopSet(String[])">
 		/// </seealso>
 		public StopFilter(bool enablePositionIncrements, TokenStream in_Renamed, System.Collections.Hashtable stopWords):this(enablePositionIncrements, in_Renamed, stopWords, false)
 		{
@@ -193,7 +193,7 @@ namespace Lucene.Net.Analysis
 		/// an Analyzer is constructed.
 		/// 
 		/// </summary>
-		/// <seealso cref="MakeStopSet(java.lang.String[], boolean)"> passing false to ignoreCase
+		/// <seealso cref="MakeStopSet(String[], bool)"> passing false to ignoreCase
 		/// </seealso>
 		public static System.Collections.Hashtable MakeStopSet(System.String[] stopWords)
 		{
@@ -206,7 +206,7 @@ namespace Lucene.Net.Analysis
 		/// an Analyzer is constructed.
 		/// 
 		/// </summary>
-		/// <seealso cref="MakeStopSet(java.lang.String[], boolean)"> passing false to ignoreCase
+		/// <seealso cref="MakeStopSet(String[], bool)"> passing false to ignoreCase
 		/// </seealso>
 		public static System.Collections.Hashtable MakeStopSet(System.Collections.IList stopWords)
 		{
@@ -274,7 +274,7 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary> Returns version-dependent default for enablePositionIncrements. Analyzers
 		/// that embed StopFilter use this method when creating the StopFilter. Prior
-		/// to 2.9, this returns {@link #getEnablePositionIncrementsDefault}. On 2.9
+		/// to 2.9, this returns <see cref="GetEnablePositionIncrementsDefault" />. On 2.9
 		/// or later, it returns true.
 		/// </summary>
 		public static bool GetEnablePositionIncrementsVersionDefault(Version matchVersion)
@@ -293,15 +293,15 @@ namespace Lucene.Net.Analysis
 		/// from now on.
 		/// <p/>
 		/// Note: behavior of a single StopFilter instance can be modified with
-		/// {@link #SetEnablePositionIncrements(boolean)}. This static method allows
+		/// <see cref="SetEnablePositionIncrements(bool)" />. This static method allows
 		/// control over behavior of classes using StopFilters internally, for
-		/// example {@link Lucene.Net.Analysis.Standard.StandardAnalyzer
-		/// StandardAnalyzer} if used with the no-arg ctor.
+        /// example <see cref="Lucene.Net.Analysis.Standard.StandardAnalyzer"/>
+		/// if used with the no-arg ctor.
 		/// <p/>
 		/// Default : false.
 		/// 
 		/// </summary>
-		/// <seealso cref="setEnablePositionIncrements(bool)">
+		/// <seealso cref="SetEnablePositionIncrements(bool)">
 		/// </seealso>
 		/// <deprecated> Please specify this when you create the StopFilter
 		/// </deprecated>
@@ -318,10 +318,10 @@ namespace Lucene.Net.Analysis
 			return enablePositionIncrements;
 		}
 		
-		/// <summary> If <code>true</code>, this StopFilter will preserve
+		/// <summary> If <c>true</c>, this StopFilter will preserve
 		/// positions of the incoming tokens (ie, accumulate and
 		/// set position increments of the removed stop tokens).
-		/// Generally, <code>true</code> is best as it does not
+		/// Generally, <c>true</c> is best as it does not
 		/// lose information (positions of the original tokens)
 		/// during indexing.
 		/// 
@@ -330,7 +330,7 @@ namespace Lucene.Net.Analysis
 		/// token is incremented.
 		/// 
 		/// <p/> <b>NOTE</b>: be sure to also
-		/// set {@link QueryParser#setEnablePositionIncrements} if
+		/// set <see cref="QueryParser.SetEnablePositionIncrements" /> if
 		/// you use QueryParser to create queries.
 		/// </summary>
 		public void  SetEnablePositionIncrements(bool enable)

Modified: incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs Sun Nov  6 05:24:26 2011
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis
 	/// It is also useful for doing things like entity extraction or proper noun analysis as
 	/// part of the analysis workflow and saving off those tokens for use in another field.
 	/// 
-	/// <pre>
+	/// <code>
 	/// TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader1));
 	/// TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
 	/// TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
@@ -45,23 +45,23 @@ namespace Lucene.Net.Analysis
 	/// d.add(new Field("f2", final2));
 	/// d.add(new Field("f3", final3));
 	/// d.add(new Field("f4", final4));
-	/// </pre>
-	/// In this example, <code>sink1</code> and <code>sink2</code> will both get tokens from both
-	/// <code>reader1</code> and <code>reader2</code> after whitespace tokenizer
+	/// </code>
+	/// In this example, <c>sink1</c> and <c>sink2</c> will both get tokens from both
+	/// <c>reader1</c> and <c>reader2</c> after whitespace tokenizer
 	/// and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
 	/// It is important, that tees are consumed before sinks (in the above example, the field names must be
 	/// less the sink's field names). If you are not sure, which stream is consumed first, you can simply
-	/// add another sink and then pass all tokens to the sinks at once using {@link #consumeAllTokens}.
+	/// add another sink and then pass all tokens to the sinks at once using <see cref="ConsumeAllTokens" />.
 	/// This TokenFilter is exhausted after this. In the above example, change
 	/// the example above to:
-	/// <pre>
+	/// <code>
 	/// ...
 	/// TokenStream final1 = new LowerCaseFilter(source1.newSinkTokenStream());
 	/// TokenStream final2 = source2.newSinkTokenStream();
 	/// sink1.consumeAllTokens();
 	/// sink2.consumeAllTokens();
 	/// ...
-	/// </pre>
+	/// </code>
 	/// In this case, the fields can be added in any order, because the sources are not used anymore and all sinks are ready.
 	/// <p/>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
 	/// </summary>
@@ -81,13 +81,13 @@ namespace Lucene.Net.Analysis
 		{
 		}
 		
-		/// <summary> Returns a new {@link SinkTokenStream} that receives all tokens consumed by this stream.</summary>
+		/// <summary> Returns a new <see cref="SinkTokenStream" /> that receives all tokens consumed by this stream.</summary>
 		public SinkTokenStream NewSinkTokenStream()
 		{
 			return NewSinkTokenStream(ACCEPT_ALL_FILTER);
 		}
 		
-		/// <summary> Returns a new {@link SinkTokenStream} that receives all tokens consumed by this stream
+		/// <summary> Returns a new <see cref="SinkTokenStream" /> that receives all tokens consumed by this stream
 		/// that pass the supplied filter.
 		/// </summary>
 		/// <seealso cref="SinkFilter">
@@ -99,7 +99,7 @@ namespace Lucene.Net.Analysis
 			return sink;
 		}
 		
-		/// <summary> Adds a {@link SinkTokenStream} created by another <code>TeeSinkTokenFilter</code>
+		/// <summary> Adds a <see cref="SinkTokenStream" /> created by another <c>TeeSinkTokenFilter</c>
 		/// to this one. The supplied stream will also receive all consumed tokens.
 		/// This method can be used to pass tokens from two different tees to one sink.
 		/// </summary>
@@ -118,7 +118,7 @@ namespace Lucene.Net.Analysis
 			this.sinks.Add(new System.WeakReference(sink));
 		}
 		
-		/// <summary> <code>TeeSinkTokenFilter</code> passes all tokens to the added sinks
+		/// <summary> <c>TeeSinkTokenFilter</c> passes all tokens to the added sinks
 		/// when itself is consumed. To be sure, that all tokens from the input
 		/// stream are passed to the sinks, you can call this methods.
 		/// This instance is exhausted after this, but all sinks are instant available.
@@ -170,15 +170,15 @@ namespace Lucene.Net.Analysis
 			}
 		}
 		
-		/// <summary> A filter that decides which {@link AttributeSource} states to store in the sink.</summary>
+		/// <summary> A filter that decides which <see cref="AttributeSource" /> states to store in the sink.</summary>
 		public abstract class SinkFilter
 		{
-			/// <summary> Returns true, iff the current state of the passed-in {@link AttributeSource} shall be stored
+			/// <summary> Returns true, iff the current state of the passed-in <see cref="AttributeSource" /> shall be stored
 			/// in the sink. 
 			/// </summary>
 			public abstract bool Accept(AttributeSource source);
 			
-			/// <summary> Called by {@link SinkTokenStream#Reset()}. This method does nothing by default
+			/// <summary> Called by <see cref="SinkTokenStream.Reset()" />. This method does nothing by default
 			/// and can optionally be overridden.
 			/// </summary>
 			public void  Reset()

Modified: incubator/lucene.net/trunk/src/core/Analysis/TeeTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/TeeTokenFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/TeeTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/TeeTokenFilter.cs Sun Nov  6 05:24:26 2011
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis
 	/// It is also useful for doing things like entity extraction or proper noun analysis as
 	/// part of the analysis workflow and saving off those tokens for use in another field.
 	/// 
-	/// <pre>
+	/// <code>
 	/// SinkTokenizer sink1 = new SinkTokenizer();
 	/// SinkTokenizer sink2 = new SinkTokenizer();
 	/// TokenStream source1 = new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(reader1), sink1), sink2);
@@ -41,9 +41,9 @@ namespace Lucene.Net.Analysis
 	/// d.add(new Field("f2", final2));
 	/// d.add(new Field("f3", final3));
 	/// d.add(new Field("f4", final4));
-	/// </pre>
-	/// In this example, <code>sink1</code> and <code>sink2</code> will both get tokens from both
-	/// <code>reader1</code> and <code>reader2</code> after whitespace tokenizer
+	/// </code>
+	/// In this example, <c>sink1</c> and <c>sink2</c> will both get tokens from both
+	/// <c>reader1</c> and <c>reader2</c> after whitespace tokenizer
 	/// and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
 	/// It is important, that tees are consumed before sinks (in the above example, the field names must be
 	/// less the sink's field names).
@@ -52,13 +52,13 @@ namespace Lucene.Net.Analysis
 	/// 
 	/// See <a href="http://issues.apache.org/jira/browse/LUCENE-1058">LUCENE-1058</a>.
 	/// <p/>
-	/// WARNING: {@link TeeTokenFilter} and {@link SinkTokenizer} only work with the old TokenStream API.
-	/// If you switch to the new API, you need to use {@link TeeSinkTokenFilter} instead, which offers 
+	/// WARNING: <see cref="TeeTokenFilter" /> and <see cref="SinkTokenizer" /> only work with the old TokenStream API.
+	/// If you switch to the new API, you need to use <see cref="TeeSinkTokenFilter" /> instead, which offers 
 	/// the same functionality.
 	/// </summary>
 	/// <seealso cref="SinkTokenizer">
 	/// </seealso>
-	/// <deprecated> Use {@link TeeSinkTokenFilter} instead
+	/// <deprecated> Use <see cref="TeeSinkTokenFilter" /> instead
 	/// 
 	/// </deprecated>
     [Obsolete("Use TeeSinkTokenFilter instead")]

Modified: incubator/lucene.net/trunk/src/core/Analysis/Token.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Token.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Token.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Token.cs Sun Nov  6 05:24:26 2011
@@ -47,15 +47,15 @@ namespace Lucene.Net.Analysis
 	/// with type "eos".  The default token type is "word".  
 	/// <p/>
 	/// A Token can optionally have metadata (a.k.a. Payload) in the form of a variable
-	/// length byte array. Use {@link TermPositions#GetPayloadLength()} and 
-	/// {@link TermPositions#GetPayload(byte[], int)} to retrieve the payloads from the index.
+	/// length byte array. Use <see cref="TermPositions.GetPayloadLength()" /> and 
+	/// <see cref="TermPositions.GetPayload(byte[], int)" /> to retrieve the payloads from the index.
 	/// </summary>
 	/// <summary><br/><br/>
 	/// </summary>
-	/// <summary><p/><b>NOTE:</b> As of 2.9, Token implements all {@link Attribute} interfaces
-	/// that are part of core Lucene and can be found in the {@code tokenattributes} subpackage.
+	/// <summary><p/><b>NOTE:</b> As of 2.9, Token implements all <see cref="Attribute" /> interfaces
+	/// that are part of core Lucene and can be found in the <see cref="Lucene.Net.Analysis.Tokenattributes"/> namespace.
 	/// Even though it is not necessary to use Token anymore, with the new TokenStream API it can
-	/// be used as convenience class that implements all {@link Attribute}s, which is especially useful
+	/// be used as convenience class that implements all <see cref="Attribute" />s, which is especially useful
 	/// to easily switch from the old to the new TokenStream API.
 	/// </summary>
 	/// <summary><br/><br/>
@@ -69,65 +69,65 @@ namespace Lucene.Net.Analysis
 	/// String for every term.  The APIs that accept String
 	/// termText are still available but a warning about the
 	/// associated performance cost has been added (below).  The
-	/// {@link #TermText()} method has been deprecated.<p/>
+	/// <see cref="TermText()" /> method has been deprecated.<p/>
 	/// </summary>
 	/// <summary><p/>Tokenizers and TokenFilters should try to re-use a Token instance when
 	/// possible for best performance, by implementing the
-	/// {@link TokenStream#IncrementToken()} API.
+	/// <see cref="TokenStream.IncrementToken()" /> API.
 	/// Failing that, to create a new Token you should first use
 	/// one of the constructors that starts with null text.  To load
-	/// the token from a char[] use {@link #SetTermBuffer(char[], int, int)}.
-	/// To load from a String use {@link #SetTermBuffer(String)} or {@link #SetTermBuffer(String, int, int)}.
-	/// Alternatively you can get the Token's termBuffer by calling either {@link #TermBuffer()},
+	/// the token from a char[] use <see cref="SetTermBuffer(char[], int, int)" />.
+	/// To load from a String use <see cref="SetTermBuffer(String)" /> or <see cref="SetTermBuffer(String, int, int)" />.
+	/// Alternatively you can get the Token's termBuffer by calling either <see cref="TermBuffer()" />,
 	/// if you know that your text is shorter than the capacity of the termBuffer
-	/// or {@link #ResizeTermBuffer(int)}, if there is any possibility
+	/// or <see cref="ResizeTermBuffer(int)" />, if there is any possibility
 	/// that you may need to grow the buffer. Fill in the characters of your term into this
-	/// buffer, with {@link String#getChars(int, int, char[], int)} if loading from a string,
-	/// or with {@link System#arraycopy(Object, int, Object, int, int)}, and finally call {@link #SetTermLength(int)} to
+    /// buffer, with <see cref="string.ToCharArray(int, int)" /> if loading from a string,
+	/// or with <see cref="Array.Copy(Array, long, Array, long, long)" />, and finally call <see cref="SetTermLength(int)" /> to
 	/// set the length of the term text.  See <a target="_top"
 	/// href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
 	/// for details.<p/>
 	/// <p/>Typical Token reuse patterns:
-	/// <ul>
-	/// <li> Copying text from a string (type is reset to {@link #DEFAULT_TYPE} if not
+	/// <list type="bullet">
+	/// <item> Copying text from a string (type is reset to <see cref="DEFAULT_TYPE" /> if not
 	/// specified):<br/>
-	/// <pre>
+	/// <code>
 	/// return reusableToken.reinit(string, startOffset, endOffset[, type]);
-	/// </pre>
-	/// </li>
-	/// <li> Copying some text from a string (type is reset to {@link #DEFAULT_TYPE}
+	/// </code>
+	/// </item>
+	/// <item> Copying some text from a string (type is reset to <see cref="DEFAULT_TYPE" />
 	/// if not specified):<br/>
-	/// <pre>
+    /// <code>
 	/// return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
-	/// </pre>
-	/// </li>
-	/// <li> Copying text from char[] buffer (type is reset to {@link #DEFAULT_TYPE}
+    /// </code>
+	/// </item>
+	/// <item> Copying text from char[] buffer (type is reset to <see cref="DEFAULT_TYPE" />
 	/// if not specified):<br/>
-	/// <pre>
+    /// <code>
 	/// return reusableToken.reinit(buffer, 0, buffer.length, startOffset, endOffset[, type]);
-	/// </pre>
-	/// </li>
-	/// <li> Copying some text from a char[] buffer (type is reset to
-	/// {@link #DEFAULT_TYPE} if not specified):<br/>
-	/// <pre>
+    /// </code>
+	/// </item>
+	/// <item> Copying some text from a char[] buffer (type is reset to
+	/// <see cref="DEFAULT_TYPE" /> if not specified):<br/>
+    /// <code>
 	/// return reusableToken.reinit(buffer, start, end - start, startOffset, endOffset[, type]);
-	/// </pre>
-	/// </li>
-	/// <li> Copying from one one Token to another (type is reset to
-	/// {@link #DEFAULT_TYPE} if not specified):<br/>
-	/// <pre>
+    /// </code>
+	/// </item>
+	/// <item> Copying from one one Token to another (type is reset to
+	/// <see cref="DEFAULT_TYPE" /> if not specified):<br/>
+    /// <code>
 	/// return reusableToken.reinit(source.termBuffer(), 0, source.termLength(), source.startOffset(), source.endOffset()[, source.type()]);
-	/// </pre>
-	/// </li>
-	/// </ul>
+    /// </code>
+	/// </item>
+	/// </list>
 	/// A few things to note:
-	/// <ul>
-	/// <li>clear() initializes all of the fields to default values. This was changed in contrast to Lucene 2.4, but should affect no one.</li>
-	/// <li>Because <code>TokenStreams</code> can be chained, one cannot assume that the <code>Token's</code> current type is correct.</li>
-	/// <li>The startOffset and endOffset represent the start and offset in the
-	/// source text, so be careful in adjusting them.</li>
-	/// <li>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</li>
-	/// </ul>
+	/// <list type="bullet">
+	/// <item>clear() initializes all of the fields to default values. This was changed in contrast to Lucene 2.4, but should affect no one.</item>
+	/// <item>Because <c>TokenStreams</c> can be chained, one cannot assume that the <c>Token's</c> current type is correct.</item>
+	/// <item>The startOffset and endOffset represent the start and offset in the
+	/// source text, so be careful in adjusting them.</item>
+	/// <item>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</item>
+	/// </list>
 	/// <p/>
 	/// </summary>
 	/// <seealso cref="Lucene.Net.Index.Payload">
@@ -148,38 +148,38 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary> Characters for the term text.</summary>
 		/// <deprecated> This will be made private. Instead, use:
-		/// {@link #TermBuffer()}, 
-		/// {@link #SetTermBuffer(char[], int, int)},
-		/// {@link #SetTermBuffer(String)}, or
-		/// {@link #SetTermBuffer(String, int, int)}
+		/// <see cref="TermBuffer()" />, 
+		/// <see cref="SetTermBuffer(char[], int, int)" />,
+		/// <see cref="SetTermBuffer(String)" />, or
+		/// <see cref="SetTermBuffer(String, int, int)" />
 		/// </deprecated>
         [Obsolete("This will be made private. Instead, use: TermBuffer(), SetTermBuffer(char[], int, int), SetTermBuffer(String) or SetTermBuffer(String, int, int)")]
 		internal char[] termBuffer;
 		
 		/// <summary> Length of term text in the buffer.</summary>
 		/// <deprecated> This will be made private. Instead, use:
-		/// {@link #TermLength()}, or @{link setTermLength(int)}.
+        /// <see cref="TermLength()" />, or <see cref="SetTermLength(int)"/>.
 		/// </deprecated>
         [Obsolete("This will be made private. Instead, use: TermLength(), or setTermLength(int)")]
 		internal int termLength;
 		
 		/// <summary> Start in source text.</summary>
 		/// <deprecated> This will be made private. Instead, use:
-		/// {@link #StartOffset()}, or @{link setStartOffset(int)}.
+        /// <see cref="StartOffset()" />, or <see cref="SetStartOffset(int)"/>.
 		/// </deprecated>
         [Obsolete("This will be made private. Instead, use: StartOffset(), or SetStartOffset(int).")]
 		internal int startOffset;
 		
 		/// <summary> End in source text.</summary>
 		/// <deprecated> This will be made private. Instead, use:
-		/// {@link #EndOffset()}, or @{link setEndOffset(int)}.
+        /// <see cref="EndOffset()" />, or <see cref="SetEndOffset(int)"/>.
 		/// </deprecated>
         [Obsolete("This will be made private. Instead, use: EndOffset(), or SetEndOffset(int).")]
 		internal int endOffset;
 		
 		/// <summary> The lexical type of the token.</summary>
 		/// <deprecated> This will be made private. Instead, use:
-		/// {@link #Type()}, or @{link setType(String)}.
+        /// <see cref="Type()" />, or <see cref="SetType(String)"/>.
 		/// </deprecated>
         [Obsolete("This will be made private. Instead, use: Type(), or SetType(String).")]
 		internal System.String type = DEFAULT_TYPE;
@@ -187,15 +187,15 @@ namespace Lucene.Net.Analysis
 		private int flags;
 		
 		/// <deprecated> This will be made private. Instead, use:
-		/// {@link #GetPayload()}, or @{link setPayload(Payload)}.
+        /// <see cref="GetPayload()" />, or <see cref="SetPayload(Payload)"/>.
 		/// </deprecated>
         [Obsolete("This will be made private. Instead, use: GetPayload(), or SetPayload(Payload).")]
 		internal Payload payload;
 		
 		/// <deprecated> This will be made private. Instead, use:
-		/// {@link #GetPositionIncrement()}, or @{link setPositionIncrement(String)}.
+        /// <see cref="GetPositionIncrement()" />, or <see cref="SetPositionIncrement(int)"/>.
 		/// </deprecated>
-        [Obsolete("This will be made private. Instead, use: GetPositionIncrement(), or SetPositionIncrement(String).")]
+        [Obsolete("This will be made private. Instead, use: GetPositionIncrement(), or SetPositionIncrement(int).")]
 		internal int positionIncrement = 1;
 		
 		/// <summary>Constructs a Token will null text. </summary>
@@ -331,28 +331,28 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary>Set the position increment.  This determines the position of this token
-		/// relative to the previous Token in a {@link TokenStream}, used in phrase
+		/// relative to the previous Token in a <see cref="TokenStream" />, used in phrase
 		/// searching.
 		/// 
 		/// <p/>The default value is one.
 		/// 
-		/// <p/>Some common uses for this are:<ul>
+		/// <p/>Some common uses for this are:<list>
 		/// 
-		/// <li>Set it to zero to put multiple terms in the same position.  This is
+		/// <item>Set it to zero to put multiple terms in the same position.  This is
 		/// useful if, e.g., a word has multiple stems.  Searches for phrases
 		/// including either stem will match.  In this case, all but the first stem's
 		/// increment should be set to zero: the increment of the first instance
 		/// should be one.  Repeating a token with an increment of zero can also be
-		/// used to boost the scores of matches on that token.</li>
+		/// used to boost the scores of matches on that token.</item>
 		/// 
-		/// <li>Set it to values greater than one to inhibit exact phrase matches.
+		/// <item>Set it to values greater than one to inhibit exact phrase matches.
 		/// If, for example, one does not want phrases to match across removed stop
 		/// words, then one could build a stop word filter that removes stop words and
 		/// also sets the increment to the number of stop words removed before each
 		/// non-stop word.  Then exact phrase queries will only match when the terms
-		/// occur with no intervening stop words.</li>
+		/// occur with no intervening stop words.</item>
 		/// 
-		/// </ul>
+		/// </list>
 		/// </summary>
 		/// <param name="positionIncrement">the distance from the prior term
 		/// </param>
@@ -366,7 +366,7 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary>Returns the position increment of this Token.</summary>
-		/// <seealso cref="setPositionIncrement">
+		/// <seealso cref="SetPositionIncrement">
 		/// </seealso>
 		public virtual int GetPositionIncrement()
 		{
@@ -377,9 +377,9 @@ namespace Lucene.Net.Analysis
 		/// indexing speed you should instead use the char[]
 		/// termBuffer methods to set the term text.
 		/// </summary>
-		/// <deprecated> use {@link #SetTermBuffer(char[], int, int)} or
-		/// {@link #SetTermBuffer(String)} or
-		/// {@link #SetTermBuffer(String, int, int)}.
+		/// <deprecated> use <see cref="SetTermBuffer(char[], int, int)" /> or
+		/// <see cref="SetTermBuffer(String)" /> or
+		/// <see cref="SetTermBuffer(String, int, int)" />.
 		/// </deprecated>
         [Obsolete("Use SetTermBuffer(char[], int, int) or SetTermBuffer(String) or SetTermBuffer(String, int, int)")]
 		public virtual void  SetTermText(System.String text)
@@ -393,9 +393,9 @@ namespace Lucene.Net.Analysis
 		/// </summary>
 		/// <deprecated> This method now has a performance penalty
 		/// because the text is stored internally in a char[].  If
-		/// possible, use {@link #TermBuffer()} and {@link
-		/// #TermLength()} directly instead.  If you really need a
-		/// String, use {@link #Term()}
+		/// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()"/>
+		/// directly instead.  If you really need a
+		/// String, use <see cref="Term()" />
 		/// </deprecated>
 		public System.String TermText()
 		{
@@ -408,8 +408,8 @@ namespace Lucene.Net.Analysis
 		/// 
 		/// This method has a performance penalty
 		/// because the text is stored internally in a char[].  If
-		/// possible, use {@link #TermBuffer()} and {@link
-		/// #TermLength()} directly instead.  If you really need a
+        /// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()"/>
+        /// directly instead.  If you really need a
 		/// String, use this method, which is nothing more than
 		/// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
 		/// </summary>
@@ -471,10 +471,10 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary>Returns the internal termBuffer character array which
 		/// you can then directly alter.  If the array is too
-		/// small for your token, use {@link
-		/// #ResizeTermBuffer(int)} to increase it.  After
-		/// altering the buffer be sure to call {@link
-		/// #setTermLength} to record the number of valid
+		/// small for your token, use <see cref="ResizeTermBuffer(int)" />
+		/// to increase it.  After
+		/// altering the buffer be sure to call <see cref="SetTermLength" />
+		/// to record the number of valid
 		/// characters that were placed into the termBuffer. 
 		/// </summary>
 		public char[] TermBuffer()
@@ -486,9 +486,9 @@ namespace Lucene.Net.Analysis
 		/// <summary>Grows the termBuffer to at least size newSize, preserving the
 		/// existing content. Note: If the next operation is to change
 		/// the contents of the term buffer use
-		/// {@link #SetTermBuffer(char[], int, int)},
-		/// {@link #SetTermBuffer(String)}, or
-		/// {@link #SetTermBuffer(String, int, int)}
+		/// <see cref="SetTermBuffer(char[], int, int)" />,
+		/// <see cref="SetTermBuffer(String)" />, or
+		/// <see cref="SetTermBuffer(String, int, int)" />
 		/// to optimally combine the resize with the setting of the termBuffer.
 		/// </summary>
 		/// <param name="newSize">minimum size of the new termBuffer
@@ -596,7 +596,7 @@ namespace Lucene.Net.Analysis
 		/// the termBuffer array. Use this to truncate the termBuffer
 		/// or to synchronize with external manipulation of the termBuffer.
 		/// Note: to grow the size of the array,
-		/// use {@link #ResizeTermBuffer(int)} first.
+		/// use <see cref="ResizeTermBuffer(int)" /> first.
 		/// </summary>
 		/// <param name="length">the truncated length
 		/// </param>
@@ -670,8 +670,8 @@ namespace Lucene.Net.Analysis
 		/// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
 		/// <p/>
 		/// 
-		/// Get the bitset for any bits that have been set.  This is completely distinct from {@link #Type()}, although they do share similar purposes.
-		/// The flags can be used to encode information about the token for use by other {@link Lucene.Net.Analysis.TokenFilter}s.
+		/// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="Type()" />, although they do share similar purposes.
+		/// The flags can be used to encode information about the token for use by other <see cref="TokenFilter"/>s.
 		/// 
 		/// 
 		/// </summary>
@@ -825,11 +825,11 @@ namespace Lucene.Net.Analysis
 			type = DEFAULT_TYPE;
 		}
 		
-		/// <summary>Shorthand for calling {@link #clear},
-		/// {@link #SetTermBuffer(char[], int, int)},
-		/// {@link #setStartOffset},
-		/// {@link #setEndOffset},
-		/// {@link #setType}
+		/// <summary>Shorthand for calling <see cref="Clear" />,
+		/// <see cref="SetTermBuffer(char[], int, int)" />,
+		/// <see cref="SetStartOffset" />,
+		/// <see cref="SetEndOffset" />,
+		/// <see cref="SetType" />
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -845,11 +845,11 @@ namespace Lucene.Net.Analysis
 			return this;
 		}
 		
-		/// <summary>Shorthand for calling {@link #clear},
-		/// {@link #SetTermBuffer(char[], int, int)},
-		/// {@link #setStartOffset},
-		/// {@link #setEndOffset}
-		/// {@link #setType} on Token.DEFAULT_TYPE
+		/// <summary>Shorthand for calling <see cref="Clear" />,
+		/// <see cref="SetTermBuffer(char[], int, int)" />,
+		/// <see cref="SetStartOffset" />,
+		/// <see cref="SetEndOffset" />
+		/// <see cref="SetType" /> on Token.DEFAULT_TYPE
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -863,11 +863,11 @@ namespace Lucene.Net.Analysis
 			return this;
 		}
 		
-		/// <summary>Shorthand for calling {@link #clear},
-		/// {@link #SetTermBuffer(String)},
-		/// {@link #setStartOffset},
-		/// {@link #setEndOffset}
-		/// {@link #setType}
+		/// <summary>Shorthand for calling <see cref="Clear" />,
+		/// <see cref="SetTermBuffer(String)" />,
+		/// <see cref="SetStartOffset" />,
+		/// <see cref="SetEndOffset" />
+		/// <see cref="SetType" />
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -881,11 +881,11 @@ namespace Lucene.Net.Analysis
 			return this;
 		}
 		
-		/// <summary>Shorthand for calling {@link #clear},
-		/// {@link #SetTermBuffer(String, int, int)},
-		/// {@link #setStartOffset},
-		/// {@link #setEndOffset}
-		/// {@link #setType}
+		/// <summary>Shorthand for calling <see cref="Clear" />,
+		/// <see cref="SetTermBuffer(String, int, int)" />,
+		/// <see cref="SetStartOffset" />,
+		/// <see cref="SetEndOffset" />
+		/// <see cref="SetType" />
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -899,11 +899,11 @@ namespace Lucene.Net.Analysis
 			return this;
 		}
 		
-		/// <summary>Shorthand for calling {@link #clear},
-		/// {@link #SetTermBuffer(String)},
-		/// {@link #setStartOffset},
-		/// {@link #setEndOffset}
-		/// {@link #setType} on Token.DEFAULT_TYPE
+		/// <summary>Shorthand for calling <see cref="Clear" />,
+		/// <see cref="SetTermBuffer(String)" />,
+		/// <see cref="SetStartOffset" />,
+		/// <see cref="SetEndOffset" />
+		/// <see cref="SetType" /> on Token.DEFAULT_TYPE
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -917,11 +917,11 @@ namespace Lucene.Net.Analysis
 			return this;
 		}
 		
-		/// <summary>Shorthand for calling {@link #clear},
-		/// {@link #SetTermBuffer(String, int, int)},
-		/// {@link #setStartOffset},
-		/// {@link #setEndOffset}
-		/// {@link #setType} on Token.DEFAULT_TYPE
+		/// <summary>Shorthand for calling <see cref="Clear" />,
+		/// <see cref="SetTermBuffer(String, int, int)" />,
+		/// <see cref="SetStartOffset" />,
+		/// <see cref="SetEndOffset" />
+		/// <see cref="SetType" /> on Token.DEFAULT_TYPE
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>

Modified: incubator/lucene.net/trunk/src/core/Analysis/TokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/TokenFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/TokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/TokenFilter.cs Sun Nov  6 05:24:26 2011
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis
 	
 	/// <summary> A TokenFilter is a TokenStream whose input is another TokenStream.
 	/// <p/>
-	/// This is an abstract class; subclasses must override {@link #IncrementToken()}.
+    /// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()" />.
 	/// 
 	/// </summary>
 	/// <seealso cref="TokenStream">
@@ -38,9 +38,9 @@ namespace Lucene.Net.Analysis
 			this.input = input;
 		}
 		
-		/// <summary>Performs end-of-stream operations, if any, and calls then <code>end()</code> on the
+		/// <summary>Performs end-of-stream operations, if any, and calls then <c>end()</c> on the
 		/// input TokenStream.<p/> 
-		/// <b>NOTE:</b> Be sure to call <code>super.end()</code> first when overriding this method.
+		/// <b>NOTE:</b> Be sure to call <c>super.end()</c> first when overriding this method.
 		/// </summary>
 		public override void  End()
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/TokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/TokenStream.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/TokenStream.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/TokenStream.cs Sun Nov  6 05:24:26 2011
@@ -33,53 +33,53 @@ using AttributeSource = Lucene.Net.Util.
 namespace Lucene.Net.Analysis
 {
 	
-	/// <summary> A <code>TokenStream</code> enumerates the sequence of tokens, either from
-	/// {@link Field}s of a {@link Document} or from query text.
+	/// <summary> A <c>TokenStream</c> enumerates the sequence of tokens, either from
+	/// <see cref="Field" />s of a <see cref="Document" /> or from query text.
 	/// <p/>
 	/// This is an abstract class. Concrete subclasses are:
-	/// <ul>
-	/// <li>{@link Tokenizer}, a <code>TokenStream</code> whose input is a Reader; and</li>
-	/// <li>{@link TokenFilter}, a <code>TokenStream</code> whose input is another
-	/// <code>TokenStream</code>.</li>
-	/// </ul>
-	/// A new <code>TokenStream</code> API has been introduced with Lucene 2.9. This API
-	/// has moved from being {@link Token} based to {@link Attribute} based. While
-	/// {@link Token} still exists in 2.9 as a convenience class, the preferred way
-	/// to store the information of a {@link Token} is to use {@link AttributeImpl}s.
+	/// <list type="bullet">
+	/// <item><see cref="Tokenizer" />, a <c>TokenStream</c> whose input is a Reader; and</item>
+	/// <item><see cref="TokenFilter" />, a <c>TokenStream</c> whose input is another
+	/// <c>TokenStream</c>.</item>
+	/// </list>
+	/// A new <c>TokenStream</c> API has been introduced with Lucene 2.9. This API
+	/// has moved from being <see cref="Token" /> based to <see cref="Attribute" /> based. While
+	/// <see cref="Token" /> still exists in 2.9 as a convenience class, the preferred way
+	/// to store the information of a <see cref="Token" /> is to use <see cref="AttributeImpl" />s.
 	/// <p/>
-	/// <code>TokenStream</code> now extends {@link AttributeSource}, which provides
-	/// access to all of the token {@link Attribute}s for the <code>TokenStream</code>.
-	/// Note that only one instance per {@link AttributeImpl} is created and reused
+	/// <c>TokenStream</c> now extends <see cref="AttributeSource" />, which provides
+	/// access to all of the token <see cref="Attribute" />s for the <c>TokenStream</c>.
+	/// Note that only one instance per <see cref="AttributeImpl" /> is created and reused
 	/// for every token. This approach reduces object creation and allows local
-	/// caching of references to the {@link AttributeImpl}s. See
-	/// {@link #IncrementToken()} for further details.
+	/// caching of references to the <see cref="AttributeImpl" />s. See
+	/// <see cref="IncrementToken()" /> for further details.
 	/// <p/>
-	/// <b>The workflow of the new <code>TokenStream</code> API is as follows:</b>
-	/// <ol>
-	/// <li>Instantiation of <code>TokenStream</code>/{@link TokenFilter}s which add/get
-	/// attributes to/from the {@link AttributeSource}.</li>
-	/// <li>The consumer calls {@link TokenStream#Reset()}.</li>
-	/// <li>The consumer retrieves attributes from the stream and stores local
-	/// references to all attributes it wants to access</li>
-	/// <li>The consumer calls {@link #IncrementToken()} until it returns false and
-	/// consumes the attributes after each call.</li>
-	/// <li>The consumer calls {@link #End()} so that any end-of-stream operations
-	/// can be performed.</li>
-	/// <li>The consumer calls {@link #Close()} to release any resource when finished
-	/// using the <code>TokenStream</code></li>
-	/// </ol>
+	/// <b>The workflow of the new <c>TokenStream</c> API is as follows:</b>
+	/// <list type="bullet">
+	/// <item>Instantiation of <c>TokenStream</c>/<see cref="TokenFilter" />s which add/get
+	/// attributes to/from the <see cref="AttributeSource" />.</item>
+	/// <item>The consumer calls <see cref="TokenStream.Reset()" />.</item>
+	/// <item>The consumer retrieves attributes from the stream and stores local
+	/// references to all attributes it wants to access</item>
+	/// <item>The consumer calls <see cref="IncrementToken()" /> until it returns false and
+	/// consumes the attributes after each call.</item>
+	/// <item>The consumer calls <see cref="End()" /> so that any end-of-stream operations
+	/// can be performed.</item>
+	/// <item>The consumer calls <see cref="Close()" /> to release any resource when finished
+	/// using the <c>TokenStream</c></item>
+	/// </list>
 	/// To make sure that filters and consumers know which attributes are available,
 	/// the attributes must be added during instantiation. Filters and consumers are
 	/// not required to check for availability of attributes in
-	/// {@link #IncrementToken()}.
+	/// <see cref="IncrementToken()" />.
 	/// <p/>
 	/// You can find some example code for the new API in the analysis package level
 	/// Javadoc.
 	/// <p/>
-	/// Sometimes it is desirable to capture a current state of a <code>TokenStream</code>
-	/// , e. g. for buffering purposes (see {@link CachingTokenFilter},
-	/// {@link TeeSinkTokenFilter}). For this usecase
-	/// {@link AttributeSource#CaptureState} and {@link AttributeSource#RestoreState}
+	/// Sometimes it is desirable to capture a current state of a <c>TokenStream</c>
+	/// , e. g. for buffering purposes (see <see cref="CachingTokenFilter" />,
+	/// <see cref="TeeSinkTokenFilter" />). For this usecase
+	/// <see cref="AttributeSource.CaptureState" /> and <see cref="AttributeSource.RestoreState" />
 	/// can be used.
 	/// </summary>
 	public abstract class TokenStream:AttributeSource
@@ -229,7 +229,7 @@ namespace Lucene.Net.Analysis
 			Check();
 		}
 		
-		/// <summary> A TokenStream using the supplied AttributeFactory for creating new {@link Attribute} instances.</summary>
+		/// <summary> A TokenStream using the supplied AttributeFactory for creating new <see cref="Attribute" /> instances.</summary>
 		protected internal TokenStream(AttributeFactory factory):base(onlyUseNewAPI?factory:new TokenWrapperAttributeFactory(factory))
 		{
 			InitBlock();
@@ -285,24 +285,24 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> For extra performance you can globally enable the new
-		/// {@link #IncrementToken} API using {@link Attribute}s. There will be a
+		/// <see cref="IncrementToken" /> API using <see cref="Attribute" />s. There will be a
 		/// small, but in most cases negligible performance increase by enabling this,
-		/// but it only works if <b>all</b> <code>TokenStream</code>s use the new API and
-		/// implement {@link #IncrementToken}. This setting can only be enabled
+		/// but it only works if <b>all</b> <c>TokenStream</c>s use the new API and
+		/// implement <see cref="IncrementToken" />. This setting can only be enabled
 		/// globally.
 		/// <p/>
-		/// This setting only affects <code>TokenStream</code>s instantiated after this
-		/// call. All <code>TokenStream</code>s already created use the other setting.
+		/// This setting only affects <c>TokenStream</c>s instantiated after this
+		/// call. All <c>TokenStream</c>s already created use the other setting.
 		/// <p/>
-		/// All core {@link Analyzer}s are compatible with this setting, if you have
-		/// your own <code>TokenStream</code>s that are also compatible, you should enable
+		/// All core <see cref="Analyzer" />s are compatible with this setting, if you have
+		/// your own <c>TokenStream</c>s that are also compatible, you should enable
 		/// this.
 		/// <p/>
-		/// When enabled, tokenization may throw {@link UnsupportedOperationException}
+		/// When enabled, tokenization may throw <see cref="InvalidOperationException" />
 		/// s, if the whole tokenizer chain is not compatible eg one of the
-		/// <code>TokenStream</code>s does not implement the new <code>TokenStream</code> API.
+		/// <c>TokenStream</c>s does not implement the new <c>TokenStream</c> API.
 		/// <p/>
-		/// The default is <code>false</code>, so there is the fallback to the old API
+		/// The default is <c>false</c>, so there is the fallback to the old API
 		/// available.
 		/// 
 		/// </summary>
@@ -318,7 +318,7 @@ namespace Lucene.Net.Analysis
 		/// <summary> Returns if only the new API is used.
 		/// 
 		/// </summary>
-		/// <seealso cref="setOnlyUseNewAPI">
+		/// <seealso cref="SetOnlyUseNewAPI">
 		/// </seealso>
 		/// <deprecated> This setting will no longer be needed in Lucene 3.0 as
 		/// the old API will be removed.
@@ -329,26 +329,26 @@ namespace Lucene.Net.Analysis
 			return onlyUseNewAPI;
 		}
 		
-		/// <summary> Consumers (i.e., {@link IndexWriter}) use this method to advance the stream to
+		/// <summary> Consumers (i.e., <see cref="IndexWriter" />) use this method to advance the stream to
 		/// the next token. Implementing classes must implement this method and update
-		/// the appropriate {@link AttributeImpl}s with the attributes of the next
+		/// the appropriate <see cref="AttributeImpl" />s with the attributes of the next
 		/// token.
 		/// 
 		/// The producer must make no assumptions about the attributes after the
 		/// method has been returned: the caller may arbitrarily change it. If the
 		/// producer needs to preserve the state for subsequent calls, it can use
-		/// {@link #captureState} to create a copy of the current attribute state.
+		/// <see cref="AttributeSource.CaptureState" /> to create a copy of the current attribute state.
 		/// 
 		/// This method is called for every token of a document, so an efficient
 		/// implementation is crucial for good performance. To avoid calls to
-		/// {@link #AddAttribute(Class)} and {@link #GetAttribute(Class)} or downcasts,
-		/// references to all {@link AttributeImpl}s that this stream uses should be
+		/// <see cref="AttributeSource.AddAttribute(Type)" /> and <see cref="AttributeSource.GetAttribute(Type)" /> or downcasts,
+		/// references to all <see cref="AttributeImpl" />s that this stream uses should be
 		/// retrieved during instantiation.
 		/// 
 		/// To ensure that filters and consumers know which attributes are available,
 		/// the attributes must be added during instantiation. Filters and consumers
 		/// are not required to check for availability of attributes in
-		/// {@link #IncrementToken()}.
+		/// <see cref="IncrementToken()" />.
 		/// 
 		/// </summary>
 		/// <returns> false for end of stream; true otherwise
@@ -377,14 +377,14 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> This method is called by the consumer after the last token has been
-		/// consumed, after {@link #IncrementToken()} returned <code>false</code>
-		/// (using the new <code>TokenStream</code> API). Streams implementing the old API
+		/// consumed, after <see cref="IncrementToken" /> returned <c>false</c>
+		/// (using the new <c>TokenStream</c> API). Streams implementing the old API
 		/// should upgrade to use this feature.
 		/// <p/>
 		/// This method can be used to perform any end-of-stream operations, such as
 		/// setting the final offset of a stream. The final offset of a stream might
 		/// differ from the offset of the last token eg in case one or more whitespaces
-		/// followed after the last token, but a {@link WhitespaceTokenizer} was used.
+		/// followed after the last token, but a <see cref="WhitespaceTokenizer" /> was used.
 		/// 
 		/// </summary>
 		/// <throws>  IOException </throws>
@@ -402,27 +402,27 @@ namespace Lucene.Net.Analysis
 		/// This implicitly defines a "contract" between consumers (callers of this
 		/// method) and producers (implementations of this method that are the source
 		/// for tokens):
-		/// <ul>
-		/// <li>A consumer must fully consume the previously returned {@link Token}
-		/// before calling this method again.</li>
-		/// <li>A producer must call {@link Token#Clear()} before setting the fields in
-		/// it and returning it</li>
-		/// </ul>
-		/// Also, the producer must make no assumptions about a {@link Token} after it
+		/// <list type="bullet">
+		/// <item>A consumer must fully consume the previously returned <see cref="Token" />
+		/// before calling this method again.</item>
+		/// <item>A producer must call <see cref="Token.Clear()" /> before setting the fields in
+		/// it and returning it</item>
+		/// </list>
+		/// Also, the producer must make no assumptions about a <see cref="Token" /> after it
 		/// has been returned: the caller may arbitrarily change it. If the producer
-		/// needs to hold onto the {@link Token} for subsequent calls, it must clone()
-		/// it before storing it. Note that a {@link TokenFilter} is considered a
+		/// needs to hold onto the <see cref="Token" /> for subsequent calls, it must clone()
+		/// it before storing it. Note that a <see cref="TokenFilter" /> is considered a
 		/// consumer.
 		/// 
 		/// </summary>
-		/// <param name="reusableToken">a {@link Token} that may or may not be used to return;
+		/// <param name="reusableToken">a <see cref="Token" /> that may or may not be used to return;
 		/// this parameter should never be null (the callee is not required to
 		/// check for null before using it, but it is a good idea to assert that
 		/// it is not null.)
 		/// </param>
-		/// <returns> next {@link Token} in the stream or null if end-of-stream was hit
+		/// <returns> next <see cref="Token" /> in the stream or null if end-of-stream was hit
 		/// </returns>
-		/// <deprecated> The new {@link #IncrementToken()} and {@link AttributeSource}
+		/// <deprecated> The new <see cref="IncrementToken" /> and <see cref="AttributeSource" />
 		/// APIs should be used instead.
 		/// </deprecated>
         [Obsolete("The new IncrementToken() and AttributeSource APIs should be used instead.")]
@@ -445,15 +445,15 @@ namespace Lucene.Net.Analysis
 			}
 		}
 		
-		/// <summary> Returns the next {@link Token} in the stream, or null at EOS.
+		/// <summary> Returns the next <see cref="Token" /> in the stream, or null at EOS.
 		/// 
 		/// </summary>
 		/// <deprecated> The returned Token is a "full private copy" (not re-used across
-		/// calls to {@link #Next()}) but will be slower than calling
-		/// {@link #Next(Token)} or using the new {@link #IncrementToken()}
-		/// method with the new {@link AttributeSource} API.
+		/// calls to <see cref="Next()" />) but will be slower than calling
+		/// <see cref="Next(Token)" /> or using the new <see cref="IncrementToken()" />
+		/// method with the new <see cref="AttributeSource" /> API.
 		/// </deprecated>
-        [Obsolete("The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling {@link #Next(Token)} or using the new IncrementToken() method with the new AttributeSource API.")]
+        [Obsolete("The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling Next(Token) or using the new IncrementToken() method with the new AttributeSource API.")]
 		public virtual Token Next()
 		{
 			if (tokenWrapper == null)
@@ -485,13 +485,13 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> Resets this stream to the beginning. This is an optional operation, so
-		/// subclasses may or may not implement this method. {@link #Reset()} is not needed for
+		/// subclasses may or may not implement this method. <see cref="Reset()" /> is not needed for
 		/// the standard indexing process. However, if the tokens of a
-		/// <code>TokenStream</code> are intended to be consumed more than once, it is
-		/// necessary to implement {@link #Reset()}. Note that if your TokenStream
+		/// <c>TokenStream</c> are intended to be consumed more than once, it is
+		/// necessary to implement <see cref="Reset()" />. Note that if your TokenStream
 		/// caches tokens and feeds them back again after a reset, it is imperative
 		/// that you clone the tokens when you store them away (on the first pass) as
-		/// well as when you return them (on future passes after {@link #Reset()}).
+		/// well as when you return them (on future passes after <see cref="Reset()" />).
 		/// </summary>
 		public virtual void  Reset()
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttribute.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttribute.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttribute.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttribute.cs Sun Nov  6 05:24:26 2011
@@ -23,7 +23,7 @@ using Attribute = Lucene.Net.Util.Attrib
 namespace Lucene.Net.Analysis.Tokenattributes
 {
 	
-	/// <summary> This attribute can be used to pass different flags down the {@link Tokenizer} chain,
+	/// <summary> This attribute can be used to pass different flags down the <see cref="Tokenizer" /> chain,
 	/// eg from one TokenFilter to another one. 
 	/// </summary>
 	public interface FlagsAttribute:Attribute
@@ -31,8 +31,8 @@ namespace Lucene.Net.Analysis.Tokenattri
 		/// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
 		/// <p/>
 		/// 
-		/// Get the bitset for any bits that have been set.  This is completely distinct from {@link TypeAttribute#Type()}, although they do share similar purposes.
-		/// The flags can be used to encode information about the token for use by other {@link Lucene.Net.Analysis.TokenFilter}s.
+		/// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="TypeAttribute.Type()" />, although they do share similar purposes.
+		/// The flags can be used to encode information about the token for use by other <see cref="Lucene.Net.Analysis.TokenFilter" />s.
 		/// 
 		/// 
 		/// </summary>

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttributeImpl.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttributeImpl.cs Sun Nov  6 05:24:26 2011
@@ -33,8 +33,8 @@ namespace Lucene.Net.Analysis.Tokenattri
 		/// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
 		/// <p/>
 		/// 
-		/// Get the bitset for any bits that have been set.  This is completely distinct from {@link TypeAttribute#Type()}, although they do share similar purposes.
-		/// The flags can be used to encode information about the token for use by other {@link Lucene.Net.Analysis.TokenFilter}s.
+		/// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="TypeAttribute.Type()" />, although they do share similar purposes.
+		/// The flags can be used to encode information about the token for use by other <see cref="Lucene.Net.Analysis.TokenFilter" />s.
 		/// 
 		/// 
 		/// </summary>

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttribute.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttribute.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttribute.cs Sun Nov  6 05:24:26 2011
@@ -23,7 +23,7 @@ using Attribute = Lucene.Net.Util.Attrib
 namespace Lucene.Net.Analysis.Tokenattributes
 {
 	
-	/// <summary> The payload of a Token. See also {@link Payload}.</summary>
+	/// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
 	public interface PayloadAttribute:Attribute
 	{
 		/// <summary> Returns this Token's payload.</summary>

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttributeImpl.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttributeImpl.cs Sun Nov  6 05:24:26 2011
@@ -23,7 +23,7 @@ using AttributeImpl = Lucene.Net.Util.At
 namespace Lucene.Net.Analysis.Tokenattributes
 {
 	
-	/// <summary> The payload of a Token. See also {@link Payload}.</summary>
+	/// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
 	[Serializable]
 	public class PayloadAttributeImpl:AttributeImpl, PayloadAttribute, System.ICloneable
 	{

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs Sun Nov  6 05:24:26 2011
@@ -28,23 +28,23 @@ namespace Lucene.Net.Analysis.Tokenattri
 	/// 
 	/// <p/>The default value is one.
 	/// 
-	/// <p/>Some common uses for this are:<ul>
+	/// <p/>Some common uses for this are:<list>
 	/// 
-	/// <li>Set it to zero to put multiple terms in the same position.  This is
+	/// <item>Set it to zero to put multiple terms in the same position.  This is
 	/// useful if, e.g., a word has multiple stems.  Searches for phrases
 	/// including either stem will match.  In this case, all but the first stem's
 	/// increment should be set to zero: the increment of the first instance
 	/// should be one.  Repeating a token with an increment of zero can also be
-	/// used to boost the scores of matches on that token.</li>
+	/// used to boost the scores of matches on that token.</item>
 	/// 
-	/// <li>Set it to values greater than one to inhibit exact phrase matches.
+	/// <item>Set it to values greater than one to inhibit exact phrase matches.
 	/// If, for example, one does not want phrases to match across removed stop
 	/// words, then one could build a stop word filter that removes stop words and
 	/// also sets the increment to the number of stop words removed before each
 	/// non-stop word.  Then exact phrase queries will only match when the terms
-	/// occur with no intervening stop words.</li>
+	/// occur with no intervening stop words.</item>
 	/// 
-	/// </ul>
+	/// </list>
 	/// 
 	/// </summary>
 	/// <seealso cref="Lucene.Net.Index.TermPositions">
@@ -59,7 +59,7 @@ namespace Lucene.Net.Analysis.Tokenattri
 		void  SetPositionIncrement(int positionIncrement);
 		
 		/// <summary>Returns the position increment of this Token.</summary>
-		/// <seealso cref="setPositionIncrement">
+		/// <seealso cref="SetPositionIncrement">
 		/// </seealso>
 		int GetPositionIncrement();
 	}

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs Sun Nov  6 05:24:26 2011
@@ -24,28 +24,28 @@ namespace Lucene.Net.Analysis.Tokenattri
 {
 	
 	/// <summary>The positionIncrement determines the position of this token
-	/// relative to the previous Token in a {@link TokenStream}, used in phrase
+	/// relative to the previous Token in a <see cref="TokenStream" />, used in phrase
 	/// searching.
 	/// 
 	/// <p/>The default value is one.
 	/// 
-	/// <p/>Some common uses for this are:<ul>
+	/// <p/>Some common uses for this are:<list>
 	/// 
-	/// <li>Set it to zero to put multiple terms in the same position.  This is
+	/// <item>Set it to zero to put multiple terms in the same position.  This is
 	/// useful if, e.g., a word has multiple stems.  Searches for phrases
 	/// including either stem will match.  In this case, all but the first stem's
 	/// increment should be set to zero: the increment of the first instance
 	/// should be one.  Repeating a token with an increment of zero can also be
-	/// used to boost the scores of matches on that token.</li>
+	/// used to boost the scores of matches on that token.</item>
 	/// 
-	/// <li>Set it to values greater than one to inhibit exact phrase matches.
+	/// <item>Set it to values greater than one to inhibit exact phrase matches.
 	/// If, for example, one does not want phrases to match across removed stop
 	/// words, then one could build a stop word filter that removes stop words and
 	/// also sets the increment to the number of stop words removed before each
 	/// non-stop word.  Then exact phrase queries will only match when the terms
-	/// occur with no intervening stop words.</li>
+	/// occur with no intervening stop words.</item>
 	/// 
-	/// </ul>
+	/// </list>
 	/// </summary>
 	[Serializable]
 	public class PositionIncrementAttributeImpl:AttributeImpl, PositionIncrementAttribute, System.ICloneable
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.Tokenattri
 		}
 		
 		/// <summary>Returns the position increment of this Token.</summary>
-		/// <seealso cref="setPositionIncrement">
+		/// <seealso cref="SetPositionIncrement">
 		/// </seealso>
 		public virtual int GetPositionIncrement()
 		{



Mime
View raw message