lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ccurr...@apache.org
Subject [Lucene.Net] svn commit: r1198132 [3/17] - in /incubator/lucene.net/trunk/src: contrib/Analyzers/AR/ contrib/Analyzers/BR/ contrib/Analyzers/CJK/ contrib/Analyzers/Cz/ contrib/Analyzers/De/ contrib/Analyzers/Fr/ contrib/Analyzers/Miscellaneous/ contrib/Analyzers/NG...
Date Sun, 06 Nov 2011 05:24:44 GMT
Modified: incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs Sun Nov  6 05:24:26 2011
@@ -39,7 +39,7 @@ namespace Similarity.Net
 	
     /// <summary> Generate "more like this" similarity queries. 
     /// Based on this mail:
-    /// <code><pre>
+    /// <pre>
     /// Lucene does let you access the document frequency of terms, with IndexReader.DocFreq().
     /// Term frequencies can be computed by re-tokenizing the text, which, for a single document,
     /// is usually fast enough.  But looking up the DocFreq() of every term in the document is
@@ -66,18 +66,18 @@ namespace Similarity.Net
     /// above.  The frequency and length thresholds could be parameters, etc.
     /// 
     /// Doug
-    /// </pre></code>
+    /// </pre>
     /// 
     /// 
-    /// <p>
+    /// <p/>
     /// <h3>Initial Usage</h3>
     /// 
     /// This class has lots of options to try to make it efficient and flexible.
-    /// See the body of {@link #main Main()} below in the source for real code, or
+    /// See the body of <see cref="Main"/> below in the source for real code, or
     /// if you want pseudo code, the simpliest possible usage is as follows. The bold
     /// fragment is specific to this class.
     /// 
-    /// <code><pre>
+    /// <pre>
     /// 
     /// IndexReader ir = ...
     /// IndexSearcher is = ...
@@ -90,37 +90,37 @@ namespace Similarity.Net
     /// <em>// now the usual iteration thru 'hits' - the only thing to watch for is to make sure
     /// you ignore the doc if it matches your 'target' document, as it should be similar to itself </em>
     /// 
-    /// </pre></code>
+    /// </pre>
     /// 
     /// Thus you:
     /// <ol>
-    /// <li> do your normal, Lucene setup for searching,
-    /// <li> create a MoreLikeThis,
-    /// <li> get the text of the doc you want to find similaries to
-    /// <li> then call one of the Like() calls to generate a similarity query
-    /// <li> call the searcher to find the similar docs
+    /// <li> do your normal, Lucene setup for searching,</li>
+    /// <li> create a MoreLikeThis,</li>
+    /// <li> get the text of the doc you want to find similaries to</li>
+    /// <li> then call one of the Like() calls to generate a similarity query</li>
+    /// <li> call the searcher to find the similar docs</li>
     /// </ol>
     /// 
     /// <h3>More Advanced Usage</h3>
     /// 
-    /// You may want to use {@link #SetFieldNames SetFieldNames(...)} so you can examine
+    /// You may want to use <see cref="SetFieldNames"/> so you can examine
     /// multiple fields (e.g. body and title) for similarity.
-    /// <p>
+    /// <p/>
     /// 
     /// Depending on the size of your index and the size and makeup of your documents you
     /// may want to call the other set methods to control how the similarity queries are
     /// generated:
     /// <ul>
-    /// <li> {@link #SetMinTermFreq SetMinTermFreq(...)}
-    /// <li> {@link #SetMinDocFreq SetMinDocFreq(...)}
-    /// <li> {@link #SetMinWordLen SetMinWordLen(...)}
-    /// <li> {@link #SetMaxWordLen SetMaxWordLen(...)}
-    /// <li> {@link #SetMaxQueryTerms SetMaxQueryTerms(...)}
-    /// <li> {@link #SetMaxNumTokensParsed SetMaxNumTokensParsed(...)}
-    /// <li> {@link #SetStopWords SetStopWord(...)} 
+    /// <li> <see cref="SetMinTermFreq"/></li>
+    /// <li> <see cref="SetMinDocFreq"/></li>
+    /// <li> <see cref="SetMinWordLen"/></li>
+    /// <li> <see cref="SetMaxWordLen"/></li>
+    /// <li> <see cref="SetMaxQueryTerms"/></li>
+    /// <li> <see cref="SetMaxNumTokensParsed"/></li>
+    /// <li> <see cref="SetStopWords"/> </li>
     /// </ul> 
     /// 
-    /// <hr>
+    /// <hr/>
     /// <pre>
     /// Changes: Mark Harwood 29/02/04
     /// Some bugfixing, some refactoring, some optimisation.
@@ -142,34 +142,34 @@ namespace Similarity.Net
     {
 		
         /// <summary> Default maximum number of tokens to parse in each example doc field that is not stored with TermVector support.</summary>
-        /// <seealso cref="#getMaxNumTokensParsed">
+        /// <seealso cref="GetMaxNumTokensParsed">
         /// </seealso>
         public const int DEFAULT_MAX_NUM_TOKENS_PARSED = 5000;
 		
 		
         /// <summary> Default analyzer to parse source doc with.</summary>
-        /// <seealso cref="#getAnalyzer">
+        /// <seealso cref="GetAnalyzer">
         /// </seealso>
         public static readonly Analyzer DEFAULT_ANALYZER = new StandardAnalyzer();
 		
         /// <summary> Ignore terms with less than this frequency in the source doc.</summary>
-        /// <seealso cref="#getMinTermFreq">
+        /// <seealso cref="GetMinTermFreq">
         /// </seealso>
-        /// <seealso cref="#setMinTermFreq">
+        /// <seealso cref="SetMinTermFreq">
         /// </seealso>
         public const int DEFAULT_MIN_TERM_FREQ = 2;
 		
         /// <summary> Ignore words which do not occur in at least this many docs.</summary>
-        /// <seealso cref="#getMinDocFreq">
+        /// <seealso cref="GetMinDocFreq">
         /// </seealso>
-        /// <seealso cref="#setMinDocFreq">
+        /// <seealso cref="SetMinDocFreq">
         /// </seealso>
         public const int DEFALT_MIN_DOC_FREQ = 5;
 		
         /// <summary> Boost terms in query based on score.</summary>
-        /// <seealso cref="#isBoost">
+        /// <seealso cref="IsBoost">
         /// </seealso>
-        /// <seealso cref="#SetBoost">
+        /// <seealso cref="SetBoost">
         /// </seealso>
         public const bool DEFAULT_BOOST = false;
 		
@@ -179,16 +179,16 @@ namespace Similarity.Net
         public static readonly System.String[] DEFAULT_FIELD_NAMES = new System.String[]{"contents"};
 		
         /// <summary> Ignore words less than this length or if 0 then this has no effect.</summary>
-        /// <seealso cref="#getMinWordLen">
+        /// <seealso cref="GetMinWordLen">
         /// </seealso>
-        /// <seealso cref="#setMinWordLen">
+        /// <seealso cref="SetMinWordLen">
         /// </seealso>
         public const int DEFAULT_MIN_WORD_LENGTH = 0;
 		
         /// <summary> Ignore words greater than this length or if 0 then this has no effect.</summary>
-        /// <seealso cref="#getMaxWordLen">
+        /// <seealso cref="GetMaxWordLen">
         /// </seealso>
-        /// <seealso cref="#setMaxWordLen">
+        /// <seealso cref="SetMaxWordLen">
         /// </seealso>
         public const int DEFAULT_MAX_WORD_LENGTH = 0;
 		
@@ -196,9 +196,9 @@ namespace Similarity.Net
         /// If null means to allow stop words.
         /// 
         /// </summary>
-        /// <seealso cref="#setStopWords">
+        /// <seealso cref="SetStopWords">
         /// </seealso>
-        /// <seealso cref="#getStopWords">
+        /// <seealso cref="GetStopWords">
         /// </seealso>
         public static readonly System.Collections.Hashtable DEFAULT_STOP_WORDS = null;
 		
@@ -208,11 +208,11 @@ namespace Similarity.Net
         /// <summary> Return a Query with no more than this many terms.
         /// 
         /// </summary>
-        /// <seealso cref="BooleanQuery#getMaxClauseCount">
+        /// <seealso cref="BooleanQuery.GetMaxClauseCount">
         /// </seealso>
-        /// <seealso cref="#getMaxQueryTerms">
+        /// <seealso cref="GetMaxQueryTerms">
         /// </seealso>
-        /// <seealso cref="#setMaxQueryTerms">
+        /// <seealso cref="SetMaxQueryTerms">
         /// </seealso>
         public const int DEFAULT_MAX_QUERY_TERMS = 25;
 		
@@ -258,12 +258,12 @@ namespace Similarity.Net
         }
 		
         /// <summary> Returns an analyzer that will be used to parse source doc with. The default analyzer
-        /// is the {@link #DEFAULT_ANALYZER}.
+        /// is the <see cref="DEFAULT_ANALYZER"/>.
         /// 
         /// </summary>
         /// <returns> the analyzer that will be used to parse source doc with.
         /// </returns>
-        /// <seealso cref="#DEFAULT_ANALYZER">
+        /// <seealso cref="DEFAULT_ANALYZER">
         /// </seealso>
         public Analyzer GetAnalyzer()
         {
@@ -271,7 +271,7 @@ namespace Similarity.Net
         }
 		
         /// <summary> Sets the analyzer to use. An analyzer is not required for generating a query with the
-        /// {@link #Like(int)} method, all other 'like' methods require an analyzer.
+        /// <see cref="Like(int)"/> method, all other 'like' methods require an analyzer.
         /// 
         /// </summary>
         /// <param name="analyzer">the analyzer to use to tokenize text.
@@ -282,7 +282,7 @@ namespace Similarity.Net
         }
 		
         /// <summary> Returns the frequency below which terms will be ignored in the source doc. The default
-        /// frequency is the {@link #DEFAULT_MIN_TERM_FREQ}.
+        /// frequency is the <see cref="DEFAULT_MIN_TERM_FREQ"/>.
         /// 
         /// </summary>
         /// <returns> the frequency below which terms will be ignored in the source doc.
@@ -303,7 +303,7 @@ namespace Similarity.Net
         }
 		
         /// <summary> Returns the frequency at which words will be ignored which do not occur in at least this
-        /// many docs. The default frequency is {@link #DEFALT_MIN_DOC_FREQ}.
+        /// many docs. The default frequency is <see cref="DEFALT_MIN_DOC_FREQ"/>.
         /// 
         /// </summary>
         /// <returns> the frequency at which words will be ignored which do not occur in at least this
@@ -327,12 +327,12 @@ namespace Similarity.Net
         }
 		
         /// <summary> Returns whether to boost terms in query based on "score" or not. The default is
-        /// {@link #DEFAULT_BOOST}.
+        /// <see cref="DEFAULT_BOOST"/>.
         /// 
         /// </summary>
         /// <returns> whether to boost terms in query based on "score" or not.
         /// </returns>
-        /// <seealso cref="#SetBoost">
+        /// <seealso cref="SetBoost">
         /// </seealso>
         public bool IsBoost()
         {
@@ -344,7 +344,7 @@ namespace Similarity.Net
         /// </summary>
         /// <param name="boost">true to boost terms in query based on "score", false otherwise.
         /// </param>
-        /// <seealso cref="#isBoost">
+        /// <seealso cref="IsBoost">
         /// </seealso>
         public void  SetBoost(bool boost)
         {
@@ -352,7 +352,7 @@ namespace Similarity.Net
         }
 		
         /// <summary> Returns the field names that will be used when generating the 'More Like This' query.
-        /// The default field names that will be used is {@link #DEFAULT_FIELD_NAMES}.
+        /// The default field names that will be used is <see cref="DEFAULT_FIELD_NAMES"/>.
         /// 
         /// </summary>
         /// <returns> the field names that will be used when generating the 'More Like This' query.
@@ -376,7 +376,7 @@ namespace Similarity.Net
         }
 		
         /// <summary> Returns the minimum word length below which words will be ignored. Set this to 0 for no
-        /// minimum word length. The default is {@link #DEFAULT_MIN_WORD_LENGTH}.
+        /// minimum word length. The default is <see cref="DEFAULT_MIN_WORD_LENGTH"/>.
         /// 
         /// </summary>
         /// <returns> the minimum word length below which words will be ignored.
@@ -397,7 +397,7 @@ namespace Similarity.Net
         }
 		
         /// <summary> Returns the maximum word length above which words will be ignored. Set this to 0 for no
-        /// maximum word length. The default is {@link #DEFAULT_MAX_WORD_LENGTH}.
+        /// maximum word length. The default is <see cref="DEFAULT_MAX_WORD_LENGTH"/>.
         /// 
         /// </summary>
         /// <returns> the maximum word length above which words will be ignored.
@@ -426,9 +426,7 @@ namespace Similarity.Net
         /// <param name="stopWords">set of stopwords, if null it means to allow stop words
         /// 
         /// </param>
-        /// <seealso cref="StopFilter.makeStopSet()">
-        /// </seealso>
-        /// <seealso cref="#getStopWords">
+        /// <seealso cref="Lucene.Net.Analysis.StopFilter.MakeStopSet(System.Collections.IList)">
         /// </seealso>
         public void  SetStopWords(System.Collections.Hashtable stopWords)
         {
@@ -436,7 +434,7 @@ namespace Similarity.Net
         }
 		
         /// <summary> Get the current stop words being used.</summary>
-        /// <seealso cref="#setStopWords">
+        /// <seealso cref="SetStopWords">
         /// </seealso>
         public System.Collections.Hashtable GetStopWords()
         {
@@ -445,7 +443,7 @@ namespace Similarity.Net
 		
 		
         /// <summary> Returns the maximum number of query terms that will be included in any generated query.
-        /// The default is {@link #DEFAULT_MAX_QUERY_TERMS}.
+        /// The default is <see cref="DEFAULT_MAX_QUERY_TERMS"/>.
         /// 
         /// </summary>
         /// <returns> the maximum number of query terms that will be included in any generated query.
@@ -468,7 +466,7 @@ namespace Similarity.Net
 		
         /// <returns> The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
         /// </returns>
-        /// <seealso cref="#DEFAULT_MAX_NUM_TOKENS_PARSED">
+        /// <seealso cref="DEFAULT_MAX_NUM_TOKENS_PARSED">
         /// </seealso>
         public int GetMaxNumTokensParsed()
         {
@@ -884,16 +882,16 @@ namespace Similarity.Net
         /// Each array has 6 elements.
         /// The elements are:
         /// <ol>
-        /// <li> The word (String)
-        /// <li> The top field that this word comes from (String)
-        /// <li> The score for this word (Float)
-        /// <li> The IDF value (Float)
-        /// <li> The frequency of this word in the index (Integer)
-        /// <li> The frequency of this word in the source document (Integer)	 	 
+        /// <li> The word (String)</li>
+        /// <li> The top field that this word comes from (String)</li>
+        /// <li> The score for this word (Float)</li>
+        /// <li> The IDF value (Float)</li>
+        /// <li> The frequency of this word in the index (Integer)</li>
+        /// <li> The frequency of this word in the source document (Integer)</li>	 	 
         /// </ol>
         /// This is a somewhat "advanced" routine, and in general only the 1st entry in the array is of interest.
         /// This method is exposed so that you can identify the "interesting words" in a document.
-        /// For an easier method to call see {@link #retrieveInterestingTerms retrieveInterestingTerms()}.
+        /// For an easier method to call see <see cref="RetrieveInterestingTerms"/>.
         /// 
         /// </summary>
         /// <param name="r">the reader that has the content of the document
@@ -901,7 +899,7 @@ namespace Similarity.Net
         /// <returns> the most intresting words in the document ordered by score, with the highest scoring, or best entry, first
         /// 
         /// </returns>
-        /// <seealso cref="#retrieveInterestingTerms">
+        /// <seealso cref="RetrieveInterestingTerms">
         /// </seealso>
         public PriorityQueue RetrieveTerms(System.IO.StreamReader r)
         {
@@ -915,16 +913,16 @@ namespace Similarity.Net
         }
 		
         /// <summary> Convenience routine to make it easy to return the most interesting words in a document.
-        /// More advanced users will call {@link #RetrieveTerms(java.io.Reader) retrieveTerms()} directly.
+        /// More advanced users will call <see cref="RetrieveTerms(System.IO.StreamReader)"/> directly.
         /// </summary>
         /// <param name="r">the source document
         /// </param>
         /// <returns> the most interesting words in the document
         /// 
         /// </returns>
-        /// <seealso cref="#RetrieveTerms(java.io.Reader)">
+        /// <seealso cref="RetrieveTerms(System.IO.StreamReader)">
         /// </seealso>
-        /// <seealso cref="#setMaxQueryTerms">
+        /// <seealso cref="SetMaxQueryTerms">
         /// </seealso>
         public System.String[] RetrieveInterestingTerms(System.IO.StreamReader r)
         {

Modified: incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs Sun Nov  6 05:24:26 2011
@@ -44,32 +44,30 @@ namespace Similarity.Net
 		
         /// <summary> Simple similarity query generators.
         /// Takes every unique word and forms a boolean query where all words are optional.
-        /// After you get this you'll use to to query your {@link IndexSearcher} for similar docs.
+        /// After you get this you'll use to to query your <see cref="IndexSearcher"/> for similar docs.
         /// The only caveat is the first hit returned <b>should be</b> your source document - you'll
         /// need to then ignore that.
         /// 
-        /// <p>
+        /// <p/>
         /// 
         /// So, if you have a code fragment like this:
-        /// <br>
+        /// <br/>
         /// <code>
         /// Query q = formSimilaryQuery( "I use Lucene to search fast. Fast searchers are good", new StandardAnalyzer(), "contents", null);
         /// </code>
         /// 
-        /// <p>
+        /// <p/>
+        /// The query returned, in string form, will be <c>'(i use lucene to search fast searchers are good')</c>.
         /// 
-        /// </summary>
-        /// <summary> The query returned, in string form, will be <code>'(i use lucene to search fast searchers are good')</code>.
-        /// 
-        /// <p>
+        /// <p/>
         /// The philosophy behind this method is "two documents are similar if they share lots of words".
         /// Note that behind the scenes, Lucenes scoring algorithm will tend to give two documents a higher similarity score if the share more uncommon words.
         /// 
-        /// <P>
+        /// <P/>
         /// This method is fail-safe in that if a long 'body' is passed in and
-        /// {@link BooleanQuery#add BooleanQuery.add()} (used internally)
+        /// <see cref="BooleanQuery.Add(BooleanClause)"/> (used internally)
         /// throws
-        /// {@link org.apache.lucene.search.BooleanQuery.TooManyClauses BooleanQuery.TooManyClauses}, the
+        /// <see cref="BooleanQuery.TooManyClauses"/>, the
         /// query as it is will be returned.
         /// 
         /// 

Modified: incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs Sun Nov  6 05:24:26 2011
@@ -22,12 +22,12 @@ using SF.Snowball.Ext;
 namespace Lucene.Net.Analysis.Snowball
 {
 	
-	/// <summary>Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
-	/// LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}.
+	/// <summary>Filters <see cref="StandardTokenizer"/> with <see cref="StandardFilter"/>, {@link
+	/// LowerCaseFilter}, <see cref="StopFilter"/> and <see cref="SnowballFilter"/>.
 	/// 
-	/// Available stemmers are listed in {@link SF.Snowball.Ext}.  The name of a
+	/// Available stemmers are listed in <see cref="SF.Snowball.Ext"/>.  The name of a
 	/// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
-	/// {@link EnglishStemmer} is named "English".
+	/// <see cref="EnglishStemmer"/> is named "English".
 	/// </summary>
 	public class SnowballAnalyzer : Analyzer
 	{
@@ -46,8 +46,8 @@ namespace Lucene.Net.Analysis.Snowball
 			stopSet = StopFilter.MakeStopSet(stopWords);
 		}
 		
-		/// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
-		/// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
+		/// <summary>Constructs a <see cref="StandardTokenizer"/> filtered by a {@link
+		/// StandardFilter}, a <see cref="LowerCaseFilter"/> and a <see cref="StopFilter"/>. 
 		/// </summary>
         public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
 		{

Modified: incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs Sun Nov  6 05:24:26 2011
@@ -27,9 +27,9 @@ namespace Lucene.Net.Analysis.Snowball
 	
 	/// <summary>A filter that stems words using a Snowball-generated stemmer.
 	/// 
-	/// Available stemmers are listed in {@link SF.Snowball.Ext}.  The name of a
+	/// Available stemmers are listed in <see cref="SF.Snowball.Ext"/>.  The name of a
 	/// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
-	/// {@link EnglishStemmer} is named "English".
+	/// <see cref="EnglishStemmer"/> is named "English".
 	/// </summary>
 	
 	public class SnowballFilter : TokenFilter
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis.Snowball
 		/// <summary>Construct the named stemming filter.
 		/// 
 		/// </summary>
-		/// <param name="in">the input tokens to stem
+        /// <param name="in_Renamed">the input tokens to stem
 		/// </param>
 		/// <param name="name">the name of a stemmer
 		/// </param>

Modified: incubator/lucene.net/trunk/src/contrib/Spatial/Geometry/LatLng.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Spatial/Geometry/LatLng.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Spatial/Geometry/LatLng.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Spatial/Geometry/LatLng.cs Sun Nov  6 05:24:26 2011
@@ -67,7 +67,7 @@ namespace Lucene.Net.Spatial.Geometry
 		/// <summary>
 		/// Calculates the distance between two lat/lng's in miles.
 		/// </summary>
-		/// <param name="latLng">The lat lng.</param>
+        /// <param name="ll2">The lat lng.</param>
 		/// <returns>Returns the distance in miles</returns>
 		public double ArcDistance(LatLng ll2)
 		{

Modified: incubator/lucene.net/trunk/src/contrib/Spatial/Tier/Projectors/CartesianTierPlotter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Spatial/Tier/Projectors/CartesianTierPlotter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Spatial/Tier/Projectors/CartesianTierPlotter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Spatial/Tier/Projectors/CartesianTierPlotter.cs Sun Nov  6 05:24:26 2011
@@ -139,7 +139,7 @@ namespace Lucene.Net.Spatial.Tier.Projec
 
 		/// <summary>
 		/// A log to the base 2 formula.
-		/// <code>Math.Log(value) / Math.Log(2)</code>
+		/// <c>Math.Log(value) / Math.Log(2)</c>
 		/// </summary>
 		/// <param name="value">The value.</param>
 		public double Log2(double value)

Modified: incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/JaroWinklerDistance.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/JaroWinklerDistance.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/JaroWinklerDistance.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/JaroWinklerDistance.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LevenshteinDistance.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LevenshteinDistance.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LevenshteinDistance.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LevenshteinDistance.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/NGramDistance.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/NGramDistance.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/NGramDistance.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/NGramDistance.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs Sun Nov  6 05:24:26 2011
@@ -22,11 +22,10 @@ namespace SpellChecker.Net.Search.Spell
 	
 	
     /// <summary> Dictionary represented by a file text.
-    /// 
-    /// <p>Format allowed: 1 word per line:<br>
-    /// word1<br>
-    /// word2<br>
-    /// word3<br>
+    /// <p/>Format allowed: 1 word per line:<br/>
+    /// word1<br/>
+    /// word2<br/>
+    /// word3<br/>
     /// 
     /// </summary>
     /// <author>  Nicolas Maisonneuve

Modified: incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs Sun Nov  6 05:24:26 2011
@@ -41,7 +41,7 @@ namespace SpellChecker.Net.Search.Spell
     /// (initially inspired by the David Spencer code).
     /// </p>
     /// 
-    /// <p>Example Usage:
+    /// <p>Example Usage:</p>
     /// 
     /// <pre>
     /// SpellChecker spellchecker = new SpellChecker(spellIndexDirectory);
@@ -97,7 +97,7 @@ namespace SpellChecker.Net.Search.Spell
         /// is created if it doesn't exist yet.
         /// </summary>
         /// <param name="gramIndex">the spell index directory</param>
-        /// <param name="sd">the {@link StringDistance} measurement to use </param>
+        /// <param name="sd">the <see cref="StringDistance"/> measurement to use </param>
         public SpellChecker(Directory gramIndex, StringDistance sd)
         {
             this.SetSpellIndex(gramIndex);
@@ -106,7 +106,7 @@ namespace SpellChecker.Net.Search.Spell
 
         /// <summary>
         /// Use the given directory as a spell checker index with a
-        /// {@link LevensteinDistance} as the default {@link StringDistance}. The
+        /// <see cref="LevenshteinDistance"/> as the default <see cref="StringDistance"/>. The
         /// directory is created if it doesn't exist yet.
         /// </summary>
         /// <param name="gramIndex">the spell index directory</param>
@@ -140,23 +140,23 @@ namespace SpellChecker.Net.Search.Spell
         }
 
         /// <summary>
-        /// Sets the {@link StringDistance} implementation for this
-        /// {@link SpellChecker} instance.
+        /// Sets the <see cref="StringDistance"/> implementation for this
+        /// <see cref="SpellChecker"/> instance.
         /// </summary>
-        /// <param name="sd">the {@link StringDistance} implementation for this
-        /// {@link SpellChecker} instance.</param>
+        /// <param name="sd">the <see cref="StringDistance"/> implementation for this
+        /// <see cref="SpellChecker"/> instance.</param>
         public void setStringDistance(StringDistance sd)
         {
             this.sd = sd;
         }
 
         /// <summary>
-        /// Returns the {@link StringDistance} instance used by this
-        /// {@link SpellChecker} instance.
+        /// Returns the <see cref="StringDistance"/> instance used by this
+        /// <see cref="SpellChecker"/> instance.
         /// </summary>
         /// <returns>
-        /// Returns the {@link StringDistance} instance used by this
-        /// {@link SpellChecker} instance.
+        /// Returns the <see cref="StringDistance"/> instance used by this
+        /// <see cref="SpellChecker"/> instance.
         /// </returns>
         public StringDistance GetStringDistance()
         {
@@ -431,7 +431,7 @@ namespace SpellChecker.Net.Search.Spell
         }
 
         /// <summary>
-        /// Indexes the data from the given {@link Dictionary}.
+        /// Indexes the data from the given <see cref="Dictionary"/>.
         /// </summary>
         /// <param name="dict">dict the dictionary to index</param>
         public void IndexDictionary(Dictionary dict)
@@ -576,10 +576,10 @@ namespace SpellChecker.Net.Search.Spell
         }
 
         /// <summary>
-        /// Returns <code>true</code> if and only if the {@link SpellChecker} is
+        /// Returns <code>true</code> if and only if the <see cref="SpellChecker"/> is
         /// closed, otherwise <code>false</code>.
         /// </summary>
-        /// <returns><code>true</code> if and only if the {@link SpellChecker} is
+        /// <returns><code>true</code> if and only if the <see cref="SpellChecker"/> is
         ///         closed, otherwise <code>false</code>.
         ///</returns>
         bool IsClosed()

Modified: incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/StringDistance.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/StringDistance.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/StringDistance.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/StringDistance.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs Sun Nov  6 05:24:26 2011
@@ -29,7 +29,7 @@ namespace WorldNet.Net
 	
 	
 	/// <summary> Expand a query by looking up synonyms for every term.
-	/// You need to invoke {@link Syns2Index} first to build the synonym index.
+	/// You need to invoke <see cref="Syns2Index"/> first to build the synonym index.
 	/// 
 	/// </summary>
 	/// <seealso cref="Syns2Index">
@@ -42,10 +42,10 @@ namespace WorldNet.Net
 		/// 
 		/// If you pass in the query "big dog" then it prints out:
 		/// 
-		/// <code><pre>
+		/// <pre>
 		/// Query: big adult^0.9 bad^0.9 bighearted^0.9 boastful^0.9 boastfully^0.9 bounteous^0.9 bountiful^0.9 braggy^0.9 crowing^0.9 freehanded^0.9 giving^0.9 grown^0.9 grownup^0.9 handsome^0.9 large^0.9 liberal^0.9 magnanimous^0.9 momentous^0.9 openhanded^0.9 prominent^0.9 swelled^0.9 vainglorious^0.9 vauntingly^0.9
 		/// dog andiron^0.9 blackguard^0.9 bounder^0.9 cad^0.9 chase^0.9 click^0.9 detent^0.9 dogtooth^0.9 firedog^0.9 frank^0.9 frankfurter^0.9 frump^0.9 heel^0.9 hotdog^0.9 hound^0.9 pawl^0.9 tag^0.9 tail^0.9 track^0.9 trail^0.9 weenie^0.9 wiener^0.9 wienerwurst^0.9
-		/// </pre></code>
+		/// </pre>
 		/// </summary>
 		[STAThread]
 		public static void  Main(System.String[] args)
@@ -78,10 +78,10 @@ namespace WorldNet.Net
 		/// <param name="query">users query that is assumed to not have any "special" query syntax, thus it should be just normal words, so "big dog" makes sense, but a query like "title:foo^1.2" doesn't as this should presumably be passed directly to the default query parser.
 		/// 
 		/// </param>
-		/// <param name="syns">a opened to the Lucene index you previously created with {@link Syns2Index}. The searcher is not closed or otherwise altered.
+		/// <param name="syns">a opened to the Lucene index you previously created with <see cref="Syns2Index"/>. The searcher is not closed or otherwise altered.
 		/// 
 		/// </param>
-		/// <param name="a">optional analyzer used to parse the users query else {@link StandardAnalyzer} is used
+		/// <param name="a">optional analyzer used to parse the users query else <see cref="StandardAnalyzer"/> is used
 		/// 
 		/// </param>
 		/// <param name="field">optional field name to search in or null if you want the default of "contents"

Modified: incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs Sun Nov  6 05:24:26 2011
@@ -27,18 +27,18 @@ namespace WorldNet.Net
 {
 	
 	/// <summary> Convert the prolog file wn_s.pl from the <a href="http://www.cogsci.princeton.edu/2.0/WNprolog-2.0.tar.gz">WordNet prolog download</a>
-	/// into a Lucene index suitable for looking up synonyms and performing query expansion ({@link SynExpand#expand SynExpand.expand(...)}).
+	/// into a Lucene index suitable for looking up synonyms and performing query expansion (<see cref="SynExpand.Expand"/>).
 	/// 
 	/// This has been tested with WordNet 2.0.
 	/// 
-	/// The index has fields named "word" ({@link #F_WORD})
-	/// and "syn" ({@link #F_SYN}).
+	/// The index has fields named "word" (<see cref="F_WORD"/>)
+	/// and "syn" (<see cref="F_SYN"/>).
 	/// <p>
 	/// The source word (such as 'big') can be looked up in the
 	/// "word" field, and if present there will be fields named "syn"
 	/// for every synonym. What's tricky here is that there could be <b>multiple</b>
 	/// fields with the same name, in the general case for words that have multiple synonyms.
-	/// That's not a problem with Lucene, you just use {@link org.apache.lucene.document.Document#getValues}
+	/// That's not a problem with Lucene, you just use <see cref="Document.GetValues"/>
 	/// </p>
 	/// <p>
 	/// While the WordNet file distinguishes groups of synonyms with
@@ -194,7 +194,7 @@ namespace WorldNet.Net
 		/// </summary>
 		/// <param name="s">string to check
 		/// </param>
-		/// <returns> <code>true</code> if the string is decent
+		/// <returns> <c>true</c> if the string is decent
 		/// </returns>
 		private static bool IsDecent(System.String s)
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/ASCIIFoldingFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/ASCIIFoldingFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/ASCIIFoldingFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/ASCIIFoldingFilter.cs Sun Nov  6 05:24:26 2011
@@ -30,29 +30,29 @@ namespace Lucene.Net.Analysis
 	/// Characters from the following Unicode blocks are converted; however, only
 	/// those characters with reasonable ASCII alternatives are converted:
 	/// 
-	/// <ul>
-	/// <li>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></li>
-    /// <li>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></li>
-    /// <li>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></li>
-    /// <li>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></li>
-    /// <li>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a></li>
-    /// <li>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a></li>
-    /// <li>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a></li>
-    /// <li>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a></li>
-    /// <li>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a></li>
-    /// <li>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a></li>
-    /// <li>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a></li>
-    /// <li>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a></li>
-    /// <li>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a></li>
-    /// <li>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></li>
-    /// <li>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></li>
-    /// <li>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></li>
-	/// </ul>
+	/// <list type="bullet">
+	/// <item>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></item>
+    /// <item>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></item>
+    /// <item>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></item>
+    /// <item>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></item>
+    /// <item>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a></item>
+    /// <item>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a></item>
+    /// <item>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a></item>
+    /// <item>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a></item>
+    /// <item>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a></item>
+    /// <item>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a></item>
+    /// <item>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a></item>
+    /// <item>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a></item>
+    /// <item>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a></item>
+    /// <item>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></item>
+    /// <item>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></item>
+    /// <item>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></item>
+	/// </list>
 	/// 
 	/// See: <a href="http://en.wikipedia.org/wiki/Latin_characters_in_Unicode">http://en.wikipedia.org/wiki/Latin_characters_in_Unicode</a>
 	/// 
 	/// The set of character conversions supported by this class is a superset of
-	/// those supported by Lucene's {@link ISOLatin1AccentFilter} which strips
+	/// those supported by Lucene's <see cref="ISOLatin1AccentFilter" /> which strips
 	/// accents from Latin1 characters.  For example, '&#192;' will be replaced by
 	/// 'a'.
 	/// </summary>

Modified: incubator/lucene.net/trunk/src/core/Analysis/Analyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Analyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Analyzer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Analyzer.cs Sun Nov  6 05:24:26 2011
@@ -23,7 +23,6 @@ using CloseableThreadLocal = Lucene.Net.
 
 namespace Lucene.Net.Analysis
 {
-	
 	/// <summary>An Analyzer builds TokenStreams, which analyze text.  It thus represents a
 	/// policy for extracting index terms from text.
 	/// <p/>
@@ -144,14 +143,14 @@ namespace Lucene.Net.Analysis
 		/// </summary>
 		/// <param name="fieldName">Fieldable name being indexed.
 		/// </param>
-		/// <returns> position increment gap, added to the next token emitted from {@link #TokenStream(String,Reader)}
+		/// <returns> position increment gap, added to the next token emitted from <see cref="TokenStream(String,System.IO.TextReader)" />
 		/// </returns>
 		public virtual int GetPositionIncrementGap(System.String fieldName)
 		{
 			return 0;
 		}
 		
-		/// <summary> Just like {@link #getPositionIncrementGap}, except for
+		/// <summary> Just like <see cref="GetPositionIncrementGap" />, except for
 		/// Token offsets instead.  By default this returns 1 for
 		/// tokenized fields and, as if the fields were joined
 		/// with an extra space character, and 0 for un-tokenized
@@ -161,7 +160,7 @@ namespace Lucene.Net.Analysis
 		/// </summary>
 		/// <param name="field">the field just indexed
 		/// </param>
-		/// <returns> offset gap, added to the next token emitted from {@link #TokenStream(String,Reader)}
+		/// <returns> offset gap, added to the next token emitted from <see cref="TokenStream(String,System.IO.TextReader)" />
 		/// </returns>
 		public virtual int GetOffsetGap(Fieldable field)
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs Sun Nov  6 05:24:26 2011
@@ -22,9 +22,9 @@ namespace Lucene.Net.Analysis
 {
 
     /// <summary>
-    /// * Base utility class for implementing a {@link CharFilter}.
+    /// * Base utility class for implementing a <see cref="CharFilter" />.
     /// * You subclass this, and then record mappings by calling
-    /// * {@link #addOffCorrectMap}, and then invoke the correct
+    /// * <see cref="AddOffCorrectMap" />, and then invoke the correct
     /// * method to correct an offset.
     /// </summary>
     public abstract class BaseCharFilter : CharFilter

Modified: incubator/lucene.net/trunk/src/core/Analysis/CachingTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/CachingTokenFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/CachingTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/CachingTokenFilter.cs Sun Nov  6 05:24:26 2011
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis
 	/// all token attribute states locally in a List.
 	/// 
 	/// <p/>CachingTokenFilter implements the optional method
-	/// {@link TokenStream#Reset()}, which repositions the
+	/// <see cref="TokenStream.Reset()" />, which repositions the
 	/// stream to the first Token. 
 	/// </summary>
 	public class CachingTokenFilter:TokenFilter

Modified: incubator/lucene.net/trunk/src/core/Analysis/CharArraySet.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/CharArraySet.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/CharArraySet.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/CharArraySet.cs Sun Nov  6 05:24:26 2011
@@ -73,7 +73,7 @@ namespace Lucene.Net.Analysis
 			this.count = count;
 		}
 		
-		/// <summary>true if the <code>len</code> chars of <code>text</code> starting at <code>off</code>
+		/// <summary>true if the <c>len</c> chars of <c>text</c> starting at <c>off</c>
 		/// are in the set 
 		/// </summary>
 		public virtual bool Contains(char[] text, int off, int len)
@@ -81,7 +81,7 @@ namespace Lucene.Net.Analysis
 			return entries[GetSlot(text, off, len)] != null;
 		}
 		
-		/// <summary>true if the <code>System.String</code> is in the set </summary>
+		/// <summary>true if the <c>System.String</c> is in the set </summary>
 		public virtual bool Contains(System.String cs)
 		{
 			return entries[GetSlot(cs)] != null;
@@ -306,17 +306,15 @@ namespace Lucene.Net.Analysis
 			return Add(o.ToString());
 		}
 		
-		/// <summary> Returns an unmodifiable {@link CharArraySet}. This allows to provide
+		/// <summary> Returns an unmodifiable <see cref="CharArraySet" />. This allows to provide
 		/// unmodifiable views of internal sets for "read-only" use.
-		/// 
 		/// </summary>
-		/// <param name="set">a set for which the unmodifiable set is returned.
+        /// <param name="set_Renamed">a set for which the unmodifiable set is returned.
 		/// </param>
-		/// <returns> an new unmodifiable {@link CharArraySet}.
+		/// <returns> an new unmodifiable <see cref="CharArraySet" />.
 		/// </returns>
-		/// <throws>  NullPointerException </throws>
-		/// <summary>           if the given set is <code>null</code>.
-		/// </summary>
+        /// <exception cref="NullReferenceException">NullReferenceException thrown 
+        /// if the given set is <c>null</c>.</exception>
 		public static CharArraySet UnmodifiableSet(CharArraySet set_Renamed)
 		{
 			if (set_Renamed == null)
@@ -329,7 +327,7 @@ namespace Lucene.Net.Analysis
 		}
 
         /// <summary>The Iterator&lt;String&gt; for this set.  Strings are constructed on the fly, so
-		/// use <code>nextCharArray</code> for more efficient access. 
+		/// use <c>nextCharArray</c> for more efficient access. 
 		/// </summary>
 		public class CharArraySetIterator : System.Collections.IEnumerator
 		{
@@ -403,10 +401,10 @@ namespace Lucene.Net.Analysis
 			return new CharArraySetIterator(this);
 		}
 		
-		/// <summary> Efficient unmodifiable {@link CharArraySet}. This implementation does not
-		/// delegate calls to a give {@link CharArraySet} like
-		/// {@link Collections#UnmodifiableSet(java.util.Set)} does. Instead is passes
-		/// the internal representation of a {@link CharArraySet} to a super
+		/// <summary> Efficient unmodifiable <see cref="CharArraySet" />. This implementation does not
+		/// delegate calls to a given <see cref="CharArraySet" /> like
+		/// Collections.UnmodifiableSet(java.util.Set) does. Instead is passes
+		/// the internal representation of a <see cref="CharArraySet" /> to a super
 		/// constructor and overrides all mutators. 
 		/// </summary>
 		private sealed class UnmodifiableCharArraySet:CharArraySet

Modified: incubator/lucene.net/trunk/src/core/Analysis/CharFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/CharFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/CharFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/CharFilter.cs Sun Nov  6 05:24:26 2011
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis
 {
 	
 	/// <summary> Subclasses of CharFilter can be chained to filter CharStream.
-	/// They can be used as {@link java.io.Reader} with additional offset
-	/// correction. {@link Tokenizer}s will automatically use {@link #CorrectOffset}
+	/// They can be used as <see cref="System.IO.TextReader" /> with additional offset
+	/// correction. <see cref="Tokenizer" />s will automatically use <see cref="CorrectOffset" />
 	/// if a CharFilter/CharStream subclass is used.
 	/// 
 	/// </summary>

Modified: incubator/lucene.net/trunk/src/core/Analysis/CharReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/CharReader.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/CharReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/CharReader.cs Sun Nov  6 05:24:26 2011
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis
 {
 	
 	/// <summary> CharReader is a Reader wrapper. It reads chars from
-	/// Reader and outputs {@link CharStream}, defining an
-	/// identify function {@link #CorrectOffset} method that
+	/// Reader and outputs <see cref="CharStream" />, defining an
+	/// identify function <see cref="CorrectOffset" /> method that
 	/// simply returns the provided offset.
 	/// </summary>
 	public sealed class CharReader:CharStream

Modified: incubator/lucene.net/trunk/src/core/Analysis/CharStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/CharStream.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/CharStream.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/CharStream.cs Sun Nov  6 05:24:26 2011
@@ -20,11 +20,11 @@ using System;
 namespace Lucene.Net.Analysis
 {
 	
-	/// <summary> CharStream adds {@link #CorrectOffset}
-	/// functionality over {@link Reader}.  All Tokenizers accept a
-	/// CharStream instead of {@link Reader} as input, which enables
+	/// <summary> CharStream adds <see cref="CorrectOffset" />
+	/// functionality over <see cref="System.IO.TextReader" />.  All Tokenizers accept a
+	/// CharStream instead of <see cref="System.IO.TextReader" /> as input, which enables
 	/// arbitrary character based filtering before tokenization. 
-	/// The {@link #CorrectOffset} method fixed offsets to account for
+	/// The <see cref="CorrectOffset" /> method fixed offsets to account for
 	/// removal or insertion of characters, so that the offsets
 	/// reported in the tokens match the character offsets of the
 	/// original Reader.

Modified: incubator/lucene.net/trunk/src/core/Analysis/ISOLatin1AccentFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/ISOLatin1AccentFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/ISOLatin1AccentFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/ISOLatin1AccentFilter.cs Sun Nov  6 05:24:26 2011
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis
 	/// <p/>
 	/// 
 	/// </summary>
-	/// <deprecated> in favor of {@link ASCIIFoldingFilter} which covers a superset 
+	/// <deprecated> in favor of <see cref="ASCIIFoldingFilter" /> which covers a superset 
 	/// of Latin 1. This class will be removed in Lucene 3.0.
 	/// </deprecated>
     [Obsolete("in favor of ASCIIFoldingFilter which covers a superset of Latin 1. This class will be removed in Lucene 3.0.")]

Modified: incubator/lucene.net/trunk/src/core/Analysis/LetterTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/LetterTokenizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/LetterTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/LetterTokenizer.cs Sun Nov  6 05:24:26 2011
@@ -36,18 +36,18 @@ namespace Lucene.Net.Analysis
 		{
 		}
 		
-		/// <summary>Construct a new LetterTokenizer using a given {@link AttributeSource}. </summary>
+		/// <summary>Construct a new LetterTokenizer using a given <see cref="AttributeSource" />. </summary>
 		public LetterTokenizer(AttributeSource source, System.IO.TextReader in_Renamed):base(source, in_Renamed)
 		{
 		}
 		
-		/// <summary>Construct a new LetterTokenizer using a given {@link Lucene.Net.Util.AttributeSource.AttributeFactory}. </summary>
+		/// <summary>Construct a new LetterTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
 		public LetterTokenizer(AttributeFactory factory, System.IO.TextReader in_Renamed):base(factory, in_Renamed)
 		{
 		}
 		
 		/// <summary>Collects only characters which satisfy
-		/// {@link Character#isLetter(char)}.
+		/// <see cref="char.IsLetter(char)" />.
 		/// </summary>
 		protected internal override bool IsTokenChar(char c)
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/LowerCaseTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/LowerCaseTokenizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/LowerCaseTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/LowerCaseTokenizer.cs Sun Nov  6 05:24:26 2011
@@ -38,18 +38,18 @@ namespace Lucene.Net.Analysis
 		{
 		}
 		
-		/// <summary>Construct a new LowerCaseTokenizer using a given {@link AttributeSource}. </summary>
+		/// <summary>Construct a new LowerCaseTokenizer using a given <see cref="AttributeSource" />. </summary>
 		public LowerCaseTokenizer(AttributeSource source, System.IO.TextReader in_Renamed):base(source, in_Renamed)
 		{
 		}
 		
-		/// <summary>Construct a new LowerCaseTokenizer using a given {@link Lucene.Net.Util.AttributeSource.AttributeFactory}. </summary>
+		/// <summary>Construct a new LowerCaseTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
 		public LowerCaseTokenizer(AttributeFactory factory, System.IO.TextReader in_Renamed):base(factory, in_Renamed)
 		{
 		}
 		
 		/// <summary>Converts char to lower case
-		/// {@link Character#toLowerCase(char)}.
+		/// <see cref="char.ToLower(char)" />.
 		/// </summary>
 		protected internal override char Normalize(char c)
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/MappingCharFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/MappingCharFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/MappingCharFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/MappingCharFilter.cs Sun Nov  6 05:24:26 2011
@@ -20,8 +20,8 @@ using System;
 namespace Lucene.Net.Analysis
 {
 	
-	/// <summary> Simplistic {@link CharFilter} that applies the mappings
-	/// contained in a {@link NormalizeCharMap} to the character
+	/// <summary> Simplistic <see cref="CharFilter" /> that applies the mappings
+	/// contained in a <see cref="NormalizeCharMap" /> to the character
 	/// stream, and correcting the resulting changes to the
 	/// offsets.
 	/// </summary>
@@ -35,13 +35,13 @@ namespace Lucene.Net.Analysis
 		private int charPointer;
 		private int nextCharCounter;
 		
-		/// Default constructor that takes a {@link CharStream}.
+		/// Default constructor that takes a <see cref="CharStream" />.
 		public MappingCharFilter(NormalizeCharMap normMap, CharStream in_Renamed):base(in_Renamed)
 		{
 			this.normMap = normMap;
 		}
 		
-		/// Easy-use constructor that takes a {@link Reader}.
+		/// Easy-use constructor that takes a <see cref="System.IO.TextReader" />.
 		public MappingCharFilter(NormalizeCharMap normMap, System.IO.TextReader in_Renamed):base(CharReader.Get(in_Renamed))
 		{
 			this.normMap = normMap;

Modified: incubator/lucene.net/trunk/src/core/Analysis/NormalizeCharMap.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/NormalizeCharMap.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/NormalizeCharMap.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/NormalizeCharMap.cs Sun Nov  6 05:24:26 2011
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis
 {
 	
 	/// <summary> Holds a map of String input to String output, to be used
-	/// with {@link MappingCharFilter}.
+	/// with <see cref="MappingCharFilter" />.
 	/// </summary>
 	public class NormalizeCharMap
 	{
@@ -32,9 +32,9 @@ namespace Lucene.Net.Analysis
 		internal int diff;
 		
 		/// <summary>Records a replacement to be applied to the inputs
-		/// stream.  Whenever <code>singleMatch</code> occurs in
+		/// stream.  Whenever <c>singleMatch</c> occurs in
 		/// the input, it will be replaced with
-		/// <code>replacement</code>.
+		/// <c>replacement</c>.
 		/// 
 		/// </summary>
 		/// <param name="singleMatch">input String to be replaced

Modified: incubator/lucene.net/trunk/src/core/Analysis/NumericTokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/NumericTokenStream.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/NumericTokenStream.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/NumericTokenStream.cs Sun Nov  6 05:24:26 2011
@@ -33,32 +33,32 @@ using PositionIncrementAttribute = Lucen
 namespace Lucene.Net.Analysis
 {
 	
-	/// <summary> <b>Expert:</b> This class provides a {@link TokenStream}
-	/// for indexing numeric values that can be used by {@link
-	/// NumericRangeQuery} or {@link NumericRangeFilter}.
+	/// <summary> <b>Expert:</b> This class provides a <see cref="TokenStream" />
+	/// for indexing numeric values that can be used by <see cref="NumericRangeQuery" />
+	/// or <see cref="NumericRangeFilter" />.
 	/// 
-	/// <p/>Note that for simple usage, {@link NumericField} is
-	/// recommended.  {@link NumericField} disables norms and
+	/// <p/>Note that for simple usage, <see cref="NumericField" /> is
+	/// recommended.  <see cref="NumericField" /> disables norms and
 	/// term freqs, as they are not usually needed during
 	/// searching.  If you need to change these settings, you
 	/// should use this class.
 	/// 
-	/// <p/>See {@link NumericField} for capabilities of fields
+	/// <p/>See <see cref="NumericField" /> for capabilities of fields
 	/// indexed numerically.<p/>
 	/// 
-	/// <p/>Here's an example usage, for an <code>int</code> field:
+	/// <p/>Here's an example usage, for an <c>int</c> field:
 	/// 
-	/// <pre>
+	/// <code>
 	///  Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value));
 	///  field.setOmitNorms(true);
 	///  field.setOmitTermFreqAndPositions(true);
 	///  document.add(field);
-	/// </pre>
+	/// </code>
 	/// 
 	/// <p/>For optimal performance, re-use the TokenStream and Field instance
 	/// for more than one document:
 	/// 
-	/// <pre>
+	/// <code>
 	///  NumericTokenStream stream = new NumericTokenStream(precisionStep);
 	///  Field field = new Field(name, stream);
 	///  field.setOmitNorms(true);
@@ -70,7 +70,7 @@ namespace Lucene.Net.Analysis
 	///    stream.setIntValue(value)
 	///    writer.addDocument(document);
 	///  }
-	/// </pre>
+	/// </code>
 	/// 
 	/// <p/>This stream is not intended to be used in analyzers;
 	/// it's more for iterating the different precisions during
@@ -78,20 +78,17 @@ namespace Lucene.Net.Analysis
 	/// 
 	/// <p/><b>NOTE</b>: as token streams are only consumed once
 	/// the document is added to the index, if you index more
-	/// than one numeric field, use a separate <code>NumericTokenStream</code>
+	/// than one numeric field, use a separate <c>NumericTokenStream</c>
 	/// instance for each.<p/>
 	/// 
-	/// <p/>See {@link NumericRangeQuery} for more details on the
-	/// <a
-	/// href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+	/// <p/>See <see cref="NumericRangeQuery" /> for more details on the
+	/// <a href="../search/NumericRangeQuery.html#precisionStepDesc"><c>precisionStep</c></a>
 	/// parameter as well as how numeric fields work under the hood.<p/>
 	/// 
 	/// <p/><font color="red"><b>NOTE:</b> This API is experimental and
 	/// might change in incompatible ways in the next release.</font>
-	/// 
+	///   Since 2.9
 	/// </summary>
-	/// <since> 2.9
-	/// </since>
 	public sealed class NumericTokenStream:TokenStream
 	{
 		private void  InitBlock()
@@ -107,8 +104,8 @@ namespace Lucene.Net.Analysis
 		/// <summary>The lower precision tokens gets this token type assigned. </summary>
 		public const System.String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
 		
-		/// <summary> Creates a token stream for numeric values using the default <code>precisionStep</code>
-		/// {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). The stream is not yet initialized,
+		/// <summary> Creates a token stream for numeric values using the default <c>precisionStep</c>
+		/// <see cref="NumericUtils.PRECISION_STEP_DEFAULT" /> (4). The stream is not yet initialized,
 		/// before using set a value using the various set<em>???</em>Value() methods.
 		/// </summary>
 		public NumericTokenStream():this(NumericUtils.PRECISION_STEP_DEFAULT)
@@ -116,7 +113,7 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> Creates a token stream for numeric values with the specified
-		/// <code>precisionStep</code>. The stream is not yet initialized,
+		/// <c>precisionStep</c>. The stream is not yet initialized,
 		/// before using set a value using the various set<em>???</em>Value() methods.
 		/// </summary>
 		public NumericTokenStream(int precisionStep):base()
@@ -128,7 +125,7 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> Expert: Creates a token stream for numeric values with the specified
-		/// <code>precisionStep</code> using the given {@link AttributeSource}.
+		/// <c>precisionStep</c> using the given <see cref="AttributeSource" />.
 		/// The stream is not yet initialized,
 		/// before using set a value using the various set<em>???</em>Value() methods.
 		/// </summary>
@@ -141,8 +138,8 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> Expert: Creates a token stream for numeric values with the specified
-		/// <code>precisionStep</code> using the given
-		/// {@link org.apache.lucene.util.AttributeSource.AttributeFactory}.
+		/// <c>precisionStep</c> using the given
+		/// <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />.
 		/// The stream is not yet initialized,
 		/// before using set a value using the various set<em>???</em>Value() methods.
 		/// </summary>
@@ -154,11 +151,11 @@ namespace Lucene.Net.Analysis
 				throw new System.ArgumentException("precisionStep must be >=1");
 		}
 		
-		/// <summary> Initializes the token stream with the supplied <code>long</code> value.</summary>
-		/// <param name="value">the value, for which this TokenStream should enumerate tokens.
+		/// <summary> Initializes the token stream with the supplied <c>long</c> value.</summary>
+		/// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
 		/// </param>
 		/// <returns> this instance, because of this you can use it the following way:
-		/// <code>new Field(name, new NumericTokenStream(precisionStep).SetLongValue(value))</code>
+		/// <c>new Field(name, new NumericTokenStream(precisionStep).SetLongValue(value))</c>
 		/// </returns>
 		public NumericTokenStream SetLongValue(long value_Renamed)
 		{
@@ -168,11 +165,11 @@ namespace Lucene.Net.Analysis
 			return this;
 		}
 		
-		/// <summary> Initializes the token stream with the supplied <code>int</code> value.</summary>
-		/// <param name="value">the value, for which this TokenStream should enumerate tokens.
+		/// <summary> Initializes the token stream with the supplied <c>int</c> value.</summary>
+		/// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
 		/// </param>
 		/// <returns> this instance, because of this you can use it the following way:
-		/// <code>new Field(name, new NumericTokenStream(precisionStep).SetIntValue(value))</code>
+		/// <c>new Field(name, new NumericTokenStream(precisionStep).SetIntValue(value))</c>
 		/// </returns>
 		public NumericTokenStream SetIntValue(int value_Renamed)
 		{
@@ -182,11 +179,11 @@ namespace Lucene.Net.Analysis
 			return this;
 		}
 		
-		/// <summary> Initializes the token stream with the supplied <code>double</code> value.</summary>
-		/// <param name="value">the value, for which this TokenStream should enumerate tokens.
+		/// <summary> Initializes the token stream with the supplied <c>double</c> value.</summary>
+		/// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
 		/// </param>
 		/// <returns> this instance, because of this you can use it the following way:
-		/// <code>new Field(name, new NumericTokenStream(precisionStep).SetDoubleValue(value))</code>
+		/// <c>new Field(name, new NumericTokenStream(precisionStep).SetDoubleValue(value))</c>
 		/// </returns>
 		public NumericTokenStream SetDoubleValue(double value_Renamed)
 		{
@@ -196,11 +193,11 @@ namespace Lucene.Net.Analysis
 			return this;
 		}
 		
-		/// <summary> Initializes the token stream with the supplied <code>float</code> value.</summary>
-		/// <param name="value">the value, for which this TokenStream should enumerate tokens.
+		/// <summary> Initializes the token stream with the supplied <c>float</c> value.</summary>
+		/// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
 		/// </param>
 		/// <returns> this instance, because of this you can use it the following way:
-		/// <code>new Field(name, new NumericTokenStream(precisionStep).SetFloatValue(value))</code>
+		/// <c>new Field(name, new NumericTokenStream(precisionStep).SetFloatValue(value))</c>
 		/// </returns>
 		public NumericTokenStream SetFloatValue(float value_Renamed)
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/PerFieldAnalyzerWrapper.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/PerFieldAnalyzerWrapper.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/PerFieldAnalyzerWrapper.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/PerFieldAnalyzerWrapper.cs Sun Nov  6 05:24:26 2011
@@ -21,17 +21,17 @@ namespace Lucene.Net.Analysis
 {
 	
 	/// <summary> This analyzer is used to facilitate scenarios where different
-	/// fields require different analysis techniques.  Use {@link #addAnalyzer}
+	/// fields require different analysis techniques.  Use <see cref="AddAnalyzer" />
 	/// to add a non-default analyzer on a field name basis.
 	/// 
 	/// <p/>Example usage:
 	/// 
-	/// <pre>
+	/// <code>
 	/// PerFieldAnalyzerWrapper aWrapper =
 	/// new PerFieldAnalyzerWrapper(new StandardAnalyzer());
 	/// aWrapper.addAnalyzer("firstname", new KeywordAnalyzer());
 	/// aWrapper.addAnalyzer("lastname", new KeywordAnalyzer());
-	/// </pre>
+	/// </code>
 	/// 
 	/// <p/>In this example, StandardAnalyzer will be used for all fields except "firstname"
 	/// and "lastname", for which KeywordAnalyzer will be used.

Modified: incubator/lucene.net/trunk/src/core/Analysis/PorterStemFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/PorterStemFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/PorterStemFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/PorterStemFilter.cs Sun Nov  6 05:24:26 2011
@@ -32,13 +32,13 @@ namespace Lucene.Net.Analysis
 	/// To use this with LowerCaseTokenizer, for example, you'd write an
 	/// analyzer like this:
 	/// <p/>
-	/// <PRE>
+	/// <code>
 	/// class MyAnalyzer extends Analyzer {
-	/// public final TokenStream tokenStream(String fieldName, Reader reader) {
-	/// return new PorterStemFilter(new LowerCaseTokenizer(reader));
+	///     public final TokenStream tokenStream(String fieldName, Reader reader) {
+	///          return new PorterStemFilter(new LowerCaseTokenizer(reader));
+	///     }
 	/// }
-	/// }
-	/// </PRE>
+	/// </code>
 	/// </summary>
 	public sealed class PorterStemFilter:TokenFilter
 	{

Modified: incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs Sun Nov  6 05:24:26 2011
@@ -20,8 +20,8 @@ using System;
 namespace Lucene.Net.Analysis
 {
 	
-	/// <summary>An {@link Analyzer} that filters {@link LetterTokenizer} 
-	/// with {@link LowerCaseFilter} 
+	/// <summary>An <see cref="Analyzer" /> that filters <see cref="LetterTokenizer" /> 
+	/// with <see cref="LowerCaseFilter" /> 
 	/// </summary>
 	
 	public sealed class SimpleAnalyzer:Analyzer

Modified: incubator/lucene.net/trunk/src/core/Analysis/SinkTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/SinkTokenizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/SinkTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/SinkTokenizer.cs Sun Nov  6 05:24:26 2011
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis
 	
 	/// <summary> A SinkTokenizer can be used to cache Tokens for use in an Analyzer
 	/// <p/>
-	/// WARNING: {@link TeeTokenFilter} and {@link SinkTokenizer} only work with the old TokenStream API.
-	/// If you switch to the new API, you need to use {@link TeeSinkTokenFilter} instead, which offers 
+	/// WARNING: <see cref="TeeTokenFilter" /> and <see cref="SinkTokenizer" /> only work with the old TokenStream API.
+	/// If you switch to the new API, you need to use <see cref="TeeSinkTokenFilter" /> instead, which offers 
 	/// the same functionality.
 	/// </summary>
 	/// <seealso cref="TeeTokenFilter">
 	/// </seealso>
-	/// <deprecated> Use {@link TeeSinkTokenFilter} instead
+	/// <deprecated> Use <see cref="TeeSinkTokenFilter" /> instead
 	/// 
 	/// 
 	/// </deprecated>
@@ -58,14 +58,14 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary> Get the tokens in the internal List.
 		/// <p/>
-		/// WARNING: Adding tokens to this list requires the {@link #Reset()} method to be called in order for them
-		/// to be made available.  Also, this Tokenizer does nothing to protect against {@link java.util.ConcurrentModificationException}s
-		/// in the case of adds happening while {@link #Next(Lucene.Net.Analysis.Token)} is being called.
+		/// WARNING: Adding tokens to this list requires the <see cref="Reset()" /> method to be called in order for them
+        /// to be made available.  Also, this Tokenizer does nothing to protect against <see cref="System.InvalidOperationException" />s
+		/// in the case of adds happening while <see cref="Next(Token)" /> is being called.
 		/// <p/>
 		/// WARNING: Since this SinkTokenizer can be reset and the cached tokens made available again, do not modify them. Modify clones instead.
 		/// 
 		/// </summary>
-		/// <returns> A List of {@link Lucene.Net.Analysis.Token}s
+		/// <returns> A List of <see cref="Lucene.Net.Analysis.Token" />s
 		/// </returns>
 		public virtual System.Collections.IList GetTokens()
 		{
@@ -73,7 +73,7 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> Returns the next token out of the list of cached tokens</summary>
-		/// <returns> The next {@link Lucene.Net.Analysis.Token} in the Sink.
+		/// <returns> The next <see cref="Lucene.Net.Analysis.Token" /> in the Sink.
 		/// </returns>
 		/// <throws>  IOException </throws>
         [Obsolete("Lucene.Net-2.9.1. This method overrides obsolete member Lucene.Net.Analysis.TokenStream.Next(Lucene.Net.Analysis.Token)")]
@@ -95,7 +95,7 @@ namespace Lucene.Net.Analysis
 		/// on the old tokens.
 		/// 
 		/// </summary>
-		/// <param name="t">The {@link Lucene.Net.Analysis.Token} to add to the sink
+		/// <param name="t">The <see cref="Lucene.Net.Analysis.Token" /> to add to the sink
 		/// </param>
 		public virtual void  Add(Token t)
 		{
@@ -112,7 +112,7 @@ namespace Lucene.Net.Analysis
 		}
 		
 		/// <summary> Reset the internal data structures to the start at the front of the list of tokens.  Should be called
-		/// if tokens were added to the list after an invocation of {@link #Next(Token)}
+		/// if tokens were added to the list after an invocation of <see cref="Next(Token)" />
 		/// </summary>
 		/// <throws>  IOException </throws>
 		public override void  Reset()

Modified: incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs Sun Nov  6 05:24:26 2011
@@ -16,26 +16,26 @@
  */
 
 using System;
-
+using System.Collections;
 using Lucene.Net.Analysis;
 using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Standard
 {
 	
-	/// <summary> Filters {@link StandardTokenizer} with {@link StandardFilter},
-	/// {@link LowerCaseFilter} and {@link StopFilter}, using a list of English stop
+	/// <summary> Filters <see cref="StandardTokenizer" /> with <see cref="StandardFilter" />,
+	/// <see cref="LowerCaseFilter" /> and <see cref="StopFilter" />, using a list of English stop
 	/// words.
 	/// 
 	/// <a name="version"/>
 	/// <p/>
-	/// You must specify the required {@link Version} compatibility when creating
+	/// You must specify the required <see cref="Version" /> compatibility when creating
 	/// StandardAnalyzer:
-	/// <ul>
-	/// <li>As of 2.9, StopFilter preserves position increments</li>
-	/// <li>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
-	/// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></li>
-	/// </ul>
+	/// <list type="bullet">
+	/// <item>As of 2.9, StopFilter preserves position increments</item>
+	/// <item>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
+	/// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>)</item>
+	/// </list>
 	/// 
 	/// </summary>
 	/// <version>  $Id: StandardAnalyzer.java 829134 2009-10-23 17:18:53Z mikemccand $
@@ -99,7 +99,7 @@ namespace Lucene.Net.Analysis.Standard
 		/// <summary>An array containing some common English words that are usually not
 		/// useful for searching. 
 		/// </summary>
-		/// <deprecated> Use {@link #STOP_WORDS_SET} instead 
+		/// <deprecated> Use <see cref="STOP_WORDS_SET" /> instead 
 		/// </deprecated>
         [Obsolete("Use STOP_WORDS_SET instead ")]
 		public static readonly System.String[] STOP_WORDS;
@@ -109,28 +109,27 @@ namespace Lucene.Net.Analysis.Standard
 		/// </summary>
 		public static readonly System.Collections.Hashtable STOP_WORDS_SET;
 		
-		/// <summary>Builds an analyzer with the default stop words ({@link
-		/// #STOP_WORDS_SET}).
+		/// <summary>Builds an analyzer with the default stop words 
+		/// (<see cref="STOP_WORDS_SET" />).
 		/// </summary>
-		/// <deprecated> Use {@link #StandardAnalyzer(Version)} instead. 
+		/// <deprecated> Use <see cref="StandardAnalyzer(Version)" /> instead. 
 		/// </deprecated>
         [Obsolete("Use StandardAnalyzer(Version) instead")]
 		public StandardAnalyzer():this(Version.LUCENE_24, STOP_WORDS_SET)
 		{
 		}
 		
-		/// <summary>Builds an analyzer with the default stop words ({@link
-		/// #STOP_WORDS}).
+		/// <summary>Builds an analyzer with the default stop words (<see cref="STOP_WORDS" />).
 		/// </summary>
-		/// <param name="matchVersion">Lucene version to match See {@link
-		/// <a href="#version">above</a>}
+		/// <param name="matchVersion">Lucene version to match See <see cref="Version">above</see> />
+		///
 		/// </param>
 		public StandardAnalyzer(Version matchVersion):this(matchVersion, STOP_WORDS_SET)
 		{
 		}
 		
 		/// <summary>Builds an analyzer with the given stop words.</summary>
-		/// <deprecated> Use {@link #StandardAnalyzer(Version, Set)}
+		/// <deprecated> Use <see cref="StandardAnalyzer(Version, Hashtable)" />
 		/// instead 
 		/// </deprecated>
         [Obsolete("Use StandardAnalyzer(Version, Set) instead")]
@@ -139,8 +138,8 @@ namespace Lucene.Net.Analysis.Standard
 		}
 		
 		/// <summary>Builds an analyzer with the given stop words.</summary>
-		/// <param name="matchVersion">Lucene version to match See {@link
-		/// <a href="#version">above</a>}
+        /// <param name="matchVersion">Lucene version to match See <see cref="Version">above</see> />
+		///
 		/// </param>
 		/// <param name="stopWords">stop words 
 		/// </param>
@@ -151,7 +150,7 @@ namespace Lucene.Net.Analysis.Standard
 		}
 		
 		/// <summary>Builds an analyzer with the given stop words.</summary>
-		/// <deprecated> Use {@link #StandardAnalyzer(Version, Set)} instead 
+		/// <deprecated> Use <see cref="StandardAnalyzer(Version, Hashtable)" /> instead 
 		/// </deprecated>
         [Obsolete("Use StandardAnalyzer(Version, Set) instead")]
 		public StandardAnalyzer(System.String[] stopWords):this(Version.LUCENE_24, StopFilter.MakeStopSet(stopWords))
@@ -159,9 +158,9 @@ namespace Lucene.Net.Analysis.Standard
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given file.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(File)">
+		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
 		/// </seealso>
-		/// <deprecated> Use {@link #StandardAnalyzer(Version, File)}
+		/// <deprecated> Use <see cref="StandardAnalyzer(Version, System.IO.FileInfo)" />
 		/// instead
 		/// </deprecated>
         [Obsolete("Use StandardAnalyzer(Version, File) instead")]
@@ -170,10 +169,10 @@ namespace Lucene.Net.Analysis.Standard
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given file.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(File)">
+		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
 		/// </seealso>
-		/// <param name="matchVersion">Lucene version to match See {@link
-		/// <a href="#version">above</a>}
+        /// <param name="matchVersion">Lucene version to match See <see cref="Version">above</see> />
+		///
 		/// </param>
 		/// <param name="stopwords">File to read stop words from 
 		/// </param>
@@ -184,9 +183,9 @@ namespace Lucene.Net.Analysis.Standard
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given reader.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+        /// <seealso cref="WordlistLoader.GetWordSet(System.IO.TextReader)">
 		/// </seealso>
-		/// <deprecated> Use {@link #StandardAnalyzer(Version, Reader)}
+		/// <deprecated> Use <see cref="StandardAnalyzer(Version, System.IO.TextReader)" />
 		/// instead
 		/// </deprecated>
         [Obsolete("Use StandardAnalyzer(Version, Reader) instead")]
@@ -195,10 +194,10 @@ namespace Lucene.Net.Analysis.Standard
 		}
 		
 		/// <summary>Builds an analyzer with the stop words from the given reader.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+        /// <seealso cref="WordlistLoader.GetWordSet(System.IO.TextReader)">
 		/// </seealso>
-		/// <param name="matchVersion">Lucene version to match See {@link
-		/// <a href="#version">above</a>}
+        /// <param name="matchVersion">Lucene version to match See <see cref="Version">above</see> />
+		///
 		/// </param>
 		/// <param name="stopwords">Reader to read stop words from 
 		/// </param>
@@ -305,8 +304,8 @@ namespace Lucene.Net.Analysis.Standard
 			}
 		}
 		
-		/// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
-		/// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
+		/// <summary>Constructs a <see cref="StandardTokenizer" /> filtered by a <see cref="StandardFilter" />
+		///, a <see cref="LowerCaseFilter" /> and a <see cref="StopFilter" />. 
 		/// </summary>
 		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
 		{
@@ -346,14 +345,14 @@ namespace Lucene.Net.Analysis.Standard
 			maxTokenLength = length;
 		}
 		
-		/// <seealso cref="setMaxTokenLength">
+		/// <seealso cref="SetMaxTokenLength">
 		/// </seealso>
 		public virtual int GetMaxTokenLength()
 		{
 			return maxTokenLength;
 		}
 		
-		/// <deprecated> Use {@link #tokenStream} instead 
+		/// <deprecated> Use <see cref="TokenStream" /> instead 
 		/// </deprecated>
         [Obsolete("Use TokenStream instead")]
 		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)

Modified: incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs Sun Nov  6 05:24:26 2011
@@ -26,7 +26,7 @@ using TypeAttribute = Lucene.Net.Analysi
 namespace Lucene.Net.Analysis.Standard
 {
 	
-	/// <summary>Normalizes tokens extracted with {@link StandardTokenizer}. </summary>
+	/// <summary>Normalizes tokens extracted with <see cref="StandardTokenizer" />. </summary>
 	
 	public sealed class StandardFilter:TokenFilter
 	{



Mime
View raw message