lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ccurr...@apache.org
Subject svn commit: r1394820 [2/6] - in /lucene.net/trunk: src/contrib/Analyzers/ src/contrib/Analyzers/AR/ src/contrib/Analyzers/BR/ src/contrib/Analyzers/CJK/ src/contrib/Analyzers/Compound/ src/contrib/Analyzers/Compound/Hyphenation/ src/contrib/Analyzers/C...
Date Fri, 05 Oct 2012 21:22:59 GMT
Modified: lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs Fri Oct  5 21:22:51 2012
@@ -32,7 +32,7 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Fr
 {
-    /**
+    /*
  * {@link Analyzer} for French language. 
  * <p>
  * Supports an external list of stopwords (words that
@@ -56,7 +56,7 @@ namespace Lucene.Net.Analysis.Fr
     public sealed class FrenchAnalyzer : Analyzer
     {
 
-        /**
+        /*
          * Extended list of typical French stopwords.
          * @deprecated use {@link #getDefaultStopSet()} instead
          */
@@ -86,11 +86,11 @@ namespace Lucene.Net.Analysis.Fr
     "été", "être", "ô"
   };
 
-        /**
+        /*
          * Contains the stopwords used with the {@link StopFilter}.
          */
         private readonly ISet<string> stoptable;
-        /**
+        /*
          * Contains words that should be indexed but not stemmed.
          */
         //TODO make this final in 3.0
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Fr
 
         private readonly Version matchVersion;
 
-        /**
+        /*
          * Returns an unmodifiable instance of the default stop-words set.
          * @return an unmodifiable instance of the default stop-words set.
          */
@@ -112,7 +112,7 @@ namespace Lucene.Net.Analysis.Fr
             internal static ISet<string> DEFAULT_STOP_SET = CharArraySet.UnmodifiableSet(new CharArraySet((IEnumerable<string>)FRENCH_STOP_WORDS, false));
         }
 
-        /**
+        /*
          * Builds an analyzer with the default stop words ({@link #FRENCH_STOP_WORDS}).
          */
         public FrenchAnalyzer(Version matchVersion)
@@ -121,7 +121,7 @@ namespace Lucene.Net.Analysis.Fr
 
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words
          * 
          * @param matchVersion
@@ -134,7 +134,7 @@ namespace Lucene.Net.Analysis.Fr
         {
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words
          * 
          * @param matchVersion
@@ -152,7 +152,7 @@ namespace Lucene.Net.Analysis.Fr
         }
 
 
-        /**
+        /*
          * Builds an analyzer with the given stop words.
          * @deprecated use {@link #FrenchAnalyzer(Version, Set)} instead
          */
@@ -162,7 +162,7 @@ namespace Lucene.Net.Analysis.Fr
 
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words.
          * @throws IOException
          * @deprecated use {@link #FrenchAnalyzer(Version, Set)} instead
@@ -172,7 +172,7 @@ namespace Lucene.Net.Analysis.Fr
         {
         }
 
-        /**
+        /*
          * Builds an exclusionlist from an array of Strings.
          * @deprecated use {@link #FrenchAnalyzer(Version, Set, Set)} instead
          */
@@ -182,7 +182,7 @@ namespace Lucene.Net.Analysis.Fr
             PreviousTokenStream = null; // force a new stemmer to be created
         }
 
-        /**
+        /*
          * Builds an exclusionlist from a Map.
          * @deprecated use {@link #FrenchAnalyzer(Version, Set, Set)} instead
          */
@@ -192,7 +192,7 @@ namespace Lucene.Net.Analysis.Fr
             PreviousTokenStream = null; // force a new stemmer to be created
         }
 
-        /**
+        /*
          * Builds an exclusionlist from the words contained in the given file.
          * @throws IOException
          * @deprecated use {@link #FrenchAnalyzer(Version, Set, Set)} instead
@@ -203,7 +203,7 @@ namespace Lucene.Net.Analysis.Fr
             PreviousTokenStream = null; // force a new stemmer to be created
         }
 
-        /**
+        /*
          * Creates a {@link TokenStream} which tokenizes all the text in the provided
          * {@link Reader}.
          *
@@ -229,7 +229,7 @@ namespace Lucene.Net.Analysis.Fr
             protected internal TokenStream result;
         };
 
-        /**
+        /*
          * Returns a (possibly reused) {@link TokenStream} which tokenizes all the 
          * text in the provided {@link Reader}.
          *

Modified: lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemFilter.cs Fri Oct  5 21:22:51 2012
@@ -30,7 +30,7 @@ using Lucene.Net.Analysis.Tokenattribute
 
 namespace Lucene.Net.Analysis.Fr
 {
-    /**
+    /*
  * A {@link TokenFilter} that stems french words. 
  * <p>
  * It supports a table of words that should
@@ -45,7 +45,7 @@ namespace Lucene.Net.Analysis.Fr
     public sealed class FrenchStemFilter : TokenFilter
     {
 
-        /**
+        /*
          * The actual token in the input stream.
          */
         private FrenchStemmer stemmer = null;
@@ -68,7 +68,7 @@ namespace Lucene.Net.Analysis.Fr
             exclusions = exclusiontable;
         }
 
-        /**
+        /*
          * @return  Returns true for the next token in the stream, or false at EOS
          */
         public override bool IncrementToken()
@@ -92,7 +92,7 @@ namespace Lucene.Net.Analysis.Fr
                 return false;
             }
         }
-        /**
+        /*
          * Set a alternative/custom {@link FrenchStemmer} for this filter.
          */
         public void SetStemmer(FrenchStemmer stemmer)
@@ -102,7 +102,7 @@ namespace Lucene.Net.Analysis.Fr
                 this.stemmer = stemmer;
             }
         }
-        /**
+        /*
          * Set an alternative exclusion list for this filter.
          */
         public void SetExclusionTable(IDictionary<string, string> exclusiontable)

Modified: lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs Fri Oct  5 21:22:51 2012
@@ -25,7 +25,7 @@ using System.Text;
 namespace Lucene.Net.Analysis.Fr
 {
     
-/**
+/*
  * A stemmer for French words. 
  * <p>
  * The algorithm is based on the work of
@@ -37,22 +37,22 @@ namespace Lucene.Net.Analysis.Fr
 
 public class FrenchStemmer {
 
-    /**
+    /*
      * Buffer for the terms while stemming them.
      */
     private StringBuilder sb = new StringBuilder();
 
-    /**
+    /*
      * A temporary buffer, used to reconstruct R2
      */
      private StringBuilder tb = new StringBuilder();
 
-	/**
+	/*
 	 * Region R0 is equal to the whole buffer
 	 */
 	private String R0;
 
-	/**
+	/*
 	 * Region RV
 	 * "If the word begins with two vowels, RV is the region after the third letter,
 	 * otherwise the region after the first vowel not at the beginning of the word,
@@ -60,14 +60,14 @@ public class FrenchStemmer {
 	 */
     private String RV;
 
-	/**
+	/*
 	 * Region R1
 	 * "R1 is the region after the first non-vowel following a vowel
 	 * or is the null region at the end of the word if there is no such non-vowel"
 	 */
     private String R1;
 
-	/**
+	/*
 	 * Region R2
 	 * "R2 is the region after the first non-vowel in R1 following a vowel
 	 * or is the null region at the end of the word if there is no such non-vowel"
@@ -75,18 +75,18 @@ public class FrenchStemmer {
     private String R2;
 
 
-	/**
+	/*
 	 * Set to true if we need to perform step 2
 	 */
     private bool suite;
 
-	/**
+	/*
 	 * Set to true if the buffer was modified
 	 */
     private bool modified;
 
 
-    /**
+    /*
      * Stems the given term to a unique <tt>discriminator</tt>.
      *
      * @param term  java.langString The term that should be stemmed
@@ -136,7 +136,7 @@ public class FrenchStemmer {
 		return sb.ToString();
     }
 
-	/**
+	/*
 	 * Sets the search region Strings<br>
 	 * it needs to be done each time the buffer was modified
 	 */
@@ -155,7 +155,7 @@ public class FrenchStemmer {
 			R2 = null;
 	}
 
-	/**
+	/*
 	 * First step of the Porter Algorithm<br>
 	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 	 */
@@ -209,7 +209,7 @@ public class FrenchStemmer {
 
 	}
 
-	/**
+	/*
 	 * Second step (A) of the Porter Algorithm<br>
 	 * Will be performed if nothing changed from the first step
 	 * or changed were done in the amment, emment, ments or ment suffixes<br>
@@ -226,7 +226,7 @@ public class FrenchStemmer {
 		return DeleteFromIfTestVowelBeforeIn( RV, search, false, RV );
 	}
 
-	/**
+	/*
 	 * Second step (B) of the Porter Algorithm<br>
 	 * Will be performed if step 2 A was performed unsuccessfully<br>
 	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
@@ -245,7 +245,7 @@ public class FrenchStemmer {
 		DeleteFrom( R2, new String[] { "ions" } );
 	}
 
-	/**
+	/*
 	 * Third step of the Porter Algorithm<br>
 	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 	 */
@@ -266,7 +266,7 @@ public class FrenchStemmer {
 		}
 	}
 
-	/**
+	/*
 	 * Fourth step of the Porter Algorithm<br>
 	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 	 */
@@ -293,7 +293,7 @@ public class FrenchStemmer {
         DeleteFromIfPrecededIn(RV, new String[] { "\u00eb" }, R0, "gu");
 	}
 
-	/**
+	/*
 	 * Fifth step of the Porter Algorithm<br>
 	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 	 */
@@ -308,7 +308,7 @@ public class FrenchStemmer {
 		}
 	}
 
-	/**
+	/*
 	 * Sixth (and last!) step of the Porter Algorithm<br>
 	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 	 */
@@ -346,7 +346,7 @@ public class FrenchStemmer {
 		}
 	}
 
-	/**
+	/*
 	 * Delete a suffix searched in zone "source" if zone "from" contains prefix + search string
 	 *
 	 * @param source java.lang.String - the primary source zone for search
@@ -375,7 +375,7 @@ public class FrenchStemmer {
 		return found;
 	}
 
-	/**
+	/*
 	 * Delete a suffix searched in zone "source" if the preceding letter is (or isn't) a vowel
 	 *
 	 * @param source java.lang.String - the primary source zone for search
@@ -409,7 +409,7 @@ public class FrenchStemmer {
 		return found;
 	}
 
-	/**
+	/*
 	 * Delete a suffix searched in zone "source" if preceded by the prefix
 	 *
 	 * @param source java.lang.String - the primary source zone for search
@@ -439,7 +439,7 @@ public class FrenchStemmer {
 		}
 	}
 
-	/**
+	/*
 	 * Delete a suffix searched in zone "source" if preceded by prefix<br>
 	 * or replace it with the replace string if preceded by the prefix in the zone "from"<br>
 	 * or delete the suffix if specified
@@ -481,7 +481,7 @@ public class FrenchStemmer {
 		}
 	}
 
-	/**
+	/*
 	 * Replace a search string with another within the source zone
 	 *
 	 * @param source java.lang.String - the source zone for search
@@ -509,7 +509,7 @@ public class FrenchStemmer {
 		return found;
 	}
 
-	/**
+	/*
 	 * Delete a search string within the source zone
 	 *
 	 * @param source the source zone for search
@@ -530,7 +530,7 @@ public class FrenchStemmer {
 		}
 	}
 
-	/**
+	/*
 	 * Test if a char is a french vowel, including accentuated ones
 	 *
 	 * @param ch the char to test
@@ -563,7 +563,7 @@ public class FrenchStemmer {
 		}
 	}
 
-	/**
+	/*
 	 * Retrieve the "R zone" (1 or 2 depending on the buffer) and return the corresponding string<br>
 	 * "R is the region after the first non-vowel following a vowel
 	 * or is the null region at the end of the word if there is no such non-vowel"<br>
@@ -599,7 +599,7 @@ public class FrenchStemmer {
 			return null;
 	}
 
-	/**
+	/*
 	 * Retrieve the "RV zone" from a buffer an return the corresponding string<br>
 	 * "If the word begins with two vowels, RV is the region after the third letter,
 	 * otherwise the region after the first vowel not at the beginning of the word,
@@ -636,7 +636,7 @@ public class FrenchStemmer {
 
 
 
-    /**
+    /*
 	 * Turns u and i preceded AND followed by a vowel to UpperCase<br>
 	 * Turns y preceded OR followed by a vowel to UpperCase<br>
 	 * Turns u preceded by q to UpperCase<br>
@@ -688,7 +688,7 @@ public class FrenchStemmer {
 		return buffer;
     }
 
-    /**
+    /*
      * Checks a term if it can be processed correctly.
      *
      * @return bool - true if, and only if, the given term consists in letters.

Modified: lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PatternAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PatternAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PatternAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PatternAnalyzer.cs Fri Oct  5 21:22:51 2012
@@ -28,7 +28,7 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Miscellaneous
 {
-    /**
+    /*
      * Efficient Lucene analyzer/tokenizer that preferably operates on a String rather than a
      * {@link java.io.Reader}, that can flexibly separate text into terms via a regular expression {@link Regex}
      * (with behaviour identical to {@link String#split(String)}),
@@ -60,10 +60,10 @@ namespace Lucene.Net.Analysis.Miscellane
     public class PatternAnalyzer : Analyzer
     {
 
-        /** <code>"\\W+"</code>; Divides text at non-letters (NOT char.IsLetter(c)) */
+        /* <c>"\\W+"</c>; Divides text at non-letters (NOT char.IsLetter(c)) */
         public static readonly Regex NON_WORD_PATTERN = new Regex("\\W+", RegexOptions.Compiled);
 
-        /** <code>"\\s+"</code>; Divides text at whitespaces (char.IsWhitespace(c)) */
+        /* <c>"\\s+"</c>; Divides text at whitespaces (char.IsWhitespace(c)) */
         public static readonly Regex WHITESPACE_PATTERN = new Regex("\\s+", RegexOptions.Compiled);
 
         private static readonly CharArraySet EXTENDED_ENGLISH_STOP_WORDS =
@@ -111,14 +111,14 @@ namespace Lucene.Net.Analysis.Miscellane
       "yourselves"
     }, true));
 
-        /**
+        /*
          * A lower-casing word analyzer with English stop words (can be shared
          * freely across threads without harm); global per class loader.
          */
         public static readonly PatternAnalyzer DEFAULT_ANALYZER = new PatternAnalyzer(
           Version.LUCENE_CURRENT, NON_WORD_PATTERN, true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
 
-        /**
+        /*
          * A lower-casing word analyzer with <b>extended </b> English stop words
          * (can be shared freely across threads without harm); global per class
          * loader. The stop words are borrowed from
@@ -134,14 +134,14 @@ namespace Lucene.Net.Analysis.Miscellane
 
         private readonly Version matchVersion;
 
-        /**
+        /*
          * Constructs a new instance with the given parameters.
          * 
          * @param matchVersion If >= {@link Version#LUCENE_29}, StopFilter.enablePositionIncrement is set to true
          * @param Regex
          *            a regular expression delimiting tokens
          * @param toLowerCase
-         *            if <code>true</code> returns tokens after applying
+         *            if <c>true</c> returns tokens after applying
          *            String.toLowerCase()
          * @param stopWords
          *            if non-null, ignores all tokens that are contained in the
@@ -149,7 +149,7 @@ namespace Lucene.Net.Analysis.Miscellane
          *            if applicable). For example, created via
          *            {@link StopFilter#makeStopSet(String[])}and/or
          *            {@link org.apache.lucene.analysis.WordlistLoader}as in
-         *            <code>WordlistLoader.getWordSet(new File("samples/fulltext/stopwords.txt")</code>
+         *            <c>WordlistLoader.getWordSet(new File("samples/fulltext/stopwords.txt")</c>
          *            or <a href="http://www.unine.ch/info/clef/">other stop words
          *            lists </a>.
          */
@@ -169,7 +169,7 @@ namespace Lucene.Net.Analysis.Miscellane
             this.matchVersion = matchVersion;
         }
 
-        /**
+        /*
          * Creates a token stream that tokenizes the given string into token terms
          * (aka words).
          * 
@@ -204,10 +204,10 @@ namespace Lucene.Net.Analysis.Miscellane
             return stream;
         }
 
-        /**
+        /*
          * Creates a token stream that tokenizes all the text in the given Reader;
-         * This implementation forwards to <code>tokenStream(String, String)</code> and is
-         * less efficient than <code>tokenStream(String, String)</code>.
+         * This implementation forwards to <c>tokenStream(String, String)</c> and is
+         * less efficient than <c>tokenStream(String, String)</c>.
          * 
          * @param fieldName
          *            the name of the field to tokenize (currently ignored).
@@ -233,7 +233,7 @@ namespace Lucene.Net.Analysis.Miscellane
             }
         }
 
-        /**
+        /*
          * Indicates whether some other object is "equal to" this one.
          * 
          * @param other
@@ -257,7 +257,7 @@ namespace Lucene.Net.Analysis.Miscellane
             return false;
         }
 
-        /**
+        /*
          * Returns a hash code value for the object.
          * 
          * @return the hash code.
@@ -275,19 +275,19 @@ namespace Lucene.Net.Analysis.Miscellane
             return h;
         }
 
-        /** equality where o1 and/or o2 can be null */
+        /* equality where o1 and/or o2 can be null */
         private static bool Eq(Object o1, Object o2)
         {
             return (o1 == o2) || (o1 != null ? o1.Equals(o2) : false);
         }
 
-        /** assumes p1 and p2 are not null */
+        /* assumes p1 and p2 are not null */
         private static bool EqRegex(Regex p1, Regex p2)
         {
             return p1 == p2 || (p1.Options == p2.Options && p1.ToString() == p2.ToString());
         }
 
-        /**
+        /*
          * Reads until end-of-stream and returns all read chars, finally closes the stream.
          * 
          * @param input the input stream
@@ -332,7 +332,7 @@ namespace Lucene.Net.Analysis.Miscellane
         ///////////////////////////////////////////////////////////////////////////////
         // Nested classes:
         ///////////////////////////////////////////////////////////////////////////////
-        /**
+        /*
          * The work horse; performance isn't fantastic, but it's not nearly as bad
          * as one might think - kudos to the Sun regex developers.
          */
@@ -406,7 +406,7 @@ namespace Lucene.Net.Analysis.Miscellane
         ///////////////////////////////////////////////////////////////////////////////
         // Nested classes:
         ///////////////////////////////////////////////////////////////////////////////
-        /**
+        /*
          * Special-case class for best performance in common cases; this class is
          * otherwise unnecessary.
          */
@@ -513,7 +513,7 @@ namespace Lucene.Net.Analysis.Miscellane
         ///////////////////////////////////////////////////////////////////////////////
         // Nested classes:
         ///////////////////////////////////////////////////////////////////////////////
-        /**
+        /*
          * A StringReader that exposes it's contained string for fast direct access.
          * Might make sense to generalize this to CharSequence and make it public?
          */

Modified: lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs Fri Oct  5 21:22:51 2012
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.NGram
         BACK
     }
 
-    /**
+    /*
      * Tokenizes the given token into n-grams of given size(s).
      * <p>
      * This <see cref="TokenFilter"/> create n-grams from the beginning edge or ending edge of a input token.
@@ -95,7 +95,7 @@ namespace Lucene.Net.Analysis.NGram
             this.offsetAtt = AddAttribute<IOffsetAttribute>();
         }
 
-        /**
+        /*
          * Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
          *
          * <param name="input"><see cref="TokenStream"/> holding the input to be tokenized</param>
@@ -130,7 +130,7 @@ namespace Lucene.Net.Analysis.NGram
             this.offsetAtt = AddAttribute<IOffsetAttribute>();
         }
 
-        /**
+        /*
          * Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
          *
          * <param name="input"><see cref="TokenStream"/> holding the input to be tokenized</param>

Modified: lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs Fri Oct  5 21:22:51 2012
@@ -25,7 +25,7 @@ using Lucene.Net.Util;
 namespace Lucene.Net.Analysis.NGram
 {
 
-    /**
+    /*
      * Tokenizes the input from an edge into n-grams of given size(s).
      * <p>
      * This <see cref="Tokenizer"/> create n-grams from the beginning edge or ending edge of a input token.
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.NGram
         private ITermAttribute termAtt;
         private IOffsetAttribute offsetAtt;
 
-        /** Specifies which side of the input the n-gram should be generated from */
+        /* Specifies which side of the input the n-gram should be generated from */
         // Moved Side enum from this class to external definition
 
         private int minGram;
@@ -53,7 +53,7 @@ namespace Lucene.Net.Analysis.NGram
         private string inStr;
 
 
-        /**
+        /*
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          *
          * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
@@ -67,7 +67,7 @@ namespace Lucene.Net.Analysis.NGram
             init(side, minGram, maxGram);
         }
 
-        /**
+        /*
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          *
          * <param name="source"><see cref="AttributeSource"/> to use</param>
@@ -83,7 +83,7 @@ namespace Lucene.Net.Analysis.NGram
             init(side, minGram, maxGram);
         }
 
-        /**
+        /*
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          * 
          * <param name="factory"><see cref="AttributeSource.AttributeFactory"/> to use</param>
@@ -99,7 +99,7 @@ namespace Lucene.Net.Analysis.NGram
             init(side, minGram, maxGram);
         }
 
-        /**
+        /*
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          *
          * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
@@ -113,7 +113,7 @@ namespace Lucene.Net.Analysis.NGram
 
         }
 
-        /**
+        /*
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          *
          * <param name="source"><see cref="AttributeSource"/> to use</param>
@@ -128,7 +128,7 @@ namespace Lucene.Net.Analysis.NGram
 
         }
 
-        /**
+        /*
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          * 
          * <param name="factory"><see cref="AttributeSource.AttributeFactory"/> to use</param>
@@ -168,7 +168,7 @@ namespace Lucene.Net.Analysis.NGram
 
         }
 
-        /** Returns the next token in the stream, or null at EOS. */
+        /* Returns the next token in the stream, or null at EOS. */
         public override bool IncrementToken()
         {
             ClearAttributes();

Modified: lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs Fri Oct  5 21:22:51 2012
@@ -24,7 +24,7 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.NGram
 {
-    /**
+    /*
      * Tokenizes the input into n-grams of the given size(s).
      */
     public sealed class NGramTokenFilter : TokenFilter
@@ -43,7 +43,7 @@ namespace Lucene.Net.Analysis.NGram
         private ITermAttribute termAtt;
         private IOffsetAttribute offsetAtt;
 
-        /**
+        /*
          * Creates NGramTokenFilter with given min and max n-grams.
          * <param name="input"><see cref="TokenStream"/> holding the input to be tokenized</param>
          * <param name="minGram">the smallest n-gram to generate</param>
@@ -68,7 +68,7 @@ namespace Lucene.Net.Analysis.NGram
             this.offsetAtt = AddAttribute<IOffsetAttribute>();
         }
 
-        /**
+        /*
          * Creates NGramTokenFilter with default min and max n-grams.
          * <param name="input"><see cref="TokenStream"/> holding the input to be tokenized</param>
          */
@@ -78,7 +78,7 @@ namespace Lucene.Net.Analysis.NGram
 
         }
 
-        /** Returns the next token in the stream, or null at EOS. */
+        /* Returns the next token in the stream, or null at EOS. */
         public override bool IncrementToken()
         {
             while (true)

Modified: lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs Fri Oct  5 21:22:51 2012
@@ -25,7 +25,7 @@ using Lucene.Net.Util;
 namespace Lucene.Net.Analysis.NGram
 {
 
-    /**
+    /*
      * Tokenizes the input into n-grams of the given size(s).
      */
     public sealed class NGramTokenizer : Tokenizer
@@ -43,7 +43,7 @@ namespace Lucene.Net.Analysis.NGram
         private ITermAttribute termAtt;
         private IOffsetAttribute offsetAtt;
 
-        /**
+        /*
          * Creates NGramTokenizer with given min and max n-grams.
          * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
          * <param name="minGram">the smallest n-gram to generate</param>
@@ -55,7 +55,7 @@ namespace Lucene.Net.Analysis.NGram
             init(minGram, maxGram);
         }
 
-        /**
+        /*
          * Creates NGramTokenizer with given min and max n-grams.
          * <param name="source"><see cref="AttributeSource"/> to use</param>
          * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
@@ -68,7 +68,7 @@ namespace Lucene.Net.Analysis.NGram
             init(minGram, maxGram);
         }
 
-        /**
+        /*
          * Creates NGramTokenizer with given min and max n-grams.
          * <param name="factory"><see cref="AttributeSource.AttributeFactory"/> to use</param>
          * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
@@ -81,7 +81,7 @@ namespace Lucene.Net.Analysis.NGram
             init(minGram, maxGram);
         }
 
-        /**
+        /*
          * Creates NGramTokenizer with default min and max n-grams.
          * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
          */
@@ -108,7 +108,7 @@ namespace Lucene.Net.Analysis.NGram
             this.offsetAtt = AddAttribute<IOffsetAttribute>();
         }
 
-        /** Returns the next token in the stream, or null at EOS. */
+        /* Returns the next token in the stream, or null at EOS. */
         public override bool IncrementToken()
         {
             ClearAttributes();

Modified: lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs Fri Oct  5 21:22:51 2012
@@ -29,7 +29,7 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Nl
 {
-    /**
+    /*
  * {@link Analyzer} for Dutch language. 
  * <p>
  * Supports an external list of stopwords (words that
@@ -45,7 +45,7 @@ namespace Lucene.Net.Analysis.Nl
  */
     public class DutchAnalyzer : Analyzer
     {
-        /**
+        /*
          * List of typical Dutch stopwords.
          * @deprecated use {@link #getDefaultStopSet()} instead
          */
@@ -62,7 +62,7 @@ namespace Lucene.Net.Analysis.Nl
         "wezen", "kunnen", "ons", "zelf", "tegen", "na", "reeds", "wil", "kon", "niets",
         "uw", "iemand", "geweest", "andere"
       };
-        /**
+        /*
          * Returns an unmodifiable instance of the default stop-words set.
          * @return an unmodifiable instance of the default stop-words set.
          */
@@ -78,12 +78,12 @@ namespace Lucene.Net.Analysis.Nl
         }
 
 
-        /**
+        /*
          * Contains the stopwords used with the StopFilter.
          */
         private readonly ISet<string> stoptable;
 
-        /**
+        /*
          * Contains words that should be indexed but not stemmed.
          */
         private ISet<string> excltable = Support.Compatibility.SetFactory.CreateHashSet<string>();
@@ -91,7 +91,7 @@ namespace Lucene.Net.Analysis.Nl
         private IDictionary<String, String> stemdict = new HashMap<String, String>();
         private readonly Version matchVersion;
 
-        /**
+        /*
          * Builds an analyzer with the default stop words ({@link #DUTCH_STOP_WORDS}) 
          * and a few default entries for the stem exclusion table.
          * 
@@ -119,7 +119,7 @@ namespace Lucene.Net.Analysis.Nl
             SetOverridesTokenStreamMethod<DutchAnalyzer>();
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words.
          *
          * @param matchVersion
@@ -132,7 +132,7 @@ namespace Lucene.Net.Analysis.Nl
 
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words.
          *
          * @param stopwords
@@ -144,7 +144,7 @@ namespace Lucene.Net.Analysis.Nl
 
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words.
          *
          * @param stopwords
@@ -166,7 +166,7 @@ namespace Lucene.Net.Analysis.Nl
             this.matchVersion = matchVersion;
         }
 
-        /**
+        /*
          * Builds an exclusionlist from an array of Strings.
          *
          * @param exclusionlist
@@ -178,7 +178,7 @@ namespace Lucene.Net.Analysis.Nl
             PreviousTokenStream = null; // force a new stemmer to be created
         }
 
-        /**
+        /*
          * Builds an exclusionlist from a Hashtable.
          * @deprecated use {@link #DutchAnalyzer(Version, Set, Set)} instead
          */
@@ -188,7 +188,7 @@ namespace Lucene.Net.Analysis.Nl
             PreviousTokenStream = null; // force a new stemmer to be created
         }
 
-        /**
+        /*
          * Builds an exclusionlist from the words contained in the given file.
          * @deprecated use {@link #DutchAnalyzer(Version, Set, Set)} instead
          */
@@ -206,7 +206,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * Reads a stemdictionary file , that overrules the stemming algorithm
          * This is a textfile that contains per line
          * <tt>word<b>\t</b>stem</tt>, i.e: two tab seperated words
@@ -225,7 +225,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * Creates a {@link TokenStream} which tokenizes all the text in the 
          * provided {@link Reader}.
          *
@@ -249,7 +249,7 @@ namespace Lucene.Net.Analysis.Nl
             protected internal TokenStream result;
         };
 
-        /**
+        /*
          * Returns a (possibly reused) {@link TokenStream} which tokenizes all the 
          * text in the provided {@link Reader}.
          *

Modified: lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemFilter.cs Fri Oct  5 21:22:51 2012
@@ -28,7 +28,7 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Analysis.Nl
 {
-    /**
+    /*
  * A {@link TokenFilter} that stems Dutch words. 
  * <p>
  * It supports a table of words that should
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis.Nl
  */
     public sealed class DutchStemFilter : TokenFilter
     {
-        /**
+        /*
          * The actual token in the input stream.
          */
         private DutchStemmer stemmer = null;
@@ -57,7 +57,7 @@ namespace Lucene.Net.Analysis.Nl
             termAtt = AddAttribute<ITermAttribute>();
         }
 
-        /**
+        /*
          * Builds a DutchStemFilter that uses an exclusion table.
          */
         public DutchStemFilter(TokenStream _in, ISet<string> exclusiontable)
@@ -66,7 +66,7 @@ namespace Lucene.Net.Analysis.Nl
             exclusions = exclusiontable;
         }
 
-        /**
+        /*
          * @param stemdictionary Dictionary of word stem pairs, that overrule the algorithm
          */
         public DutchStemFilter(TokenStream _in, ISet<string> exclusiontable, IDictionary<string, string> stemdictionary)
@@ -75,7 +75,7 @@ namespace Lucene.Net.Analysis.Nl
             stemmer.SetStemDictionary(stemdictionary);
         }
 
-        /**
+        /*
          * Returns the next token in the stream, or null at EOS
          */
         public override bool IncrementToken()
@@ -100,7 +100,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * Set a alternative/custom {@link DutchStemmer} for this filter.
          */
         public void SetStemmer(DutchStemmer stemmer)
@@ -111,7 +111,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * Set an alternative exclusion list for this filter.
          */
         public void SetExclusionTable(ISet<string> exclusiontable)
@@ -119,7 +119,7 @@ namespace Lucene.Net.Analysis.Nl
             exclusions = exclusiontable;
         }
 
-        /**
+        /*
          * Set dictionary for stemming, this dictionary overrules the algorithm,
          * so you can correct for a particular unwanted word-stem pair.
          */

Modified: lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemmer.cs Fri Oct  5 21:22:51 2012
@@ -27,7 +27,7 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Analysis.Nl
 {
-    /**
+    /*
      * A stemmer for Dutch words. 
      * <p>
      * The algorithm is an implementation of
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Nl
 
     public class DutchStemmer
     {
-        /**
+        /*
          * Buffer for the terms while stemming them.
          */
         private StringBuilder sb = new StringBuilder();
@@ -142,7 +142,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * Remove suffix e if in R1 and
          * preceded by a non-vowel, and then undouble the ending
          *
@@ -165,7 +165,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * Remove "heid"
          *
          * @param sb String being stemmed
@@ -183,7 +183,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * <p>A d-suffix, or derivational suffix, enables a new word,
          * often with a different grammatical category, or with a different
          * sense, to be built from another word. Whether a d-suffix can be
@@ -256,7 +256,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * undouble vowel
          * If the words ends CVD, where C is a non-vowel, D is a non-vowel other than I, and V is double a, e, o or u, remove one of the vowels from V (for example, maan -> man, brood -> brod).
          *
@@ -282,7 +282,7 @@ namespace Lucene.Net.Analysis.Nl
             }
         }
 
-        /**
+        /*
          * Checks if a term could be stemmed.
          *
          * @return true if, and only if, the given term consists in letters.
@@ -296,7 +296,7 @@ namespace Lucene.Net.Analysis.Nl
             return true;
         }
 
-        /**
+        /*
          * Substitute ä, ë, ï, ö, ü, á , é, í, ó, ú
          */
         private void substitute(StringBuilder buffer)

Modified: lucene.net/trunk/src/contrib/Analyzers/Payloads/TokenOffsetPayloadTokenFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Payloads/TokenOffsetPayloadTokenFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Payloads/TokenOffsetPayloadTokenFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Payloads/TokenOffsetPayloadTokenFilter.cs Fri Oct  5 21:22:51 2012
@@ -25,8 +25,8 @@ using Lucene.Net.Index;
 namespace Lucene.Net.Analysis.Payloads
 {
     /// <summary>
-    /// Adds the <see cref="Token.SetStartOffset(int)"/>
-    /// and <see cref="Token.SetEndOffset(int)"/>
+    /// Adds the <see cref="Token.StartOffset"/>
+    /// and <see cref="Token.EndOffset"/>
     /// First 4 bytes are the start
     /// </summary>
     public class TokenOffsetPayloadTokenFilter : TokenFilter

Modified: lucene.net/trunk/src/contrib/Analyzers/Position/PositionFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Position/PositionFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Position/PositionFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Position/PositionFilter.cs Fri Oct  5 21:22:51 2012
@@ -27,22 +27,22 @@ using Lucene.Net.Analysis.Tokenattribute
 
 namespace Lucene.Net.Analysis.Position
 {
-    /** Set the positionIncrement of all tokens to the "positionIncrement",
+    /* Set the positionIncrement of all tokens to the "positionIncrement",
      * except the first return token which retains its original positionIncrement value.
      * The default positionIncrement value is zero.
      */
     public sealed class PositionFilter : TokenFilter
     {
 
-        /** Position increment to assign to all but the first token - default = 0 */
+        /* Position increment to assign to all but the first token - default = 0 */
         private int positionIncrement = 0;
 
-        /** The first token must have non-zero positionIncrement **/
+        /* The first token must have non-zero positionIncrement **/
         private bool firstTokenPositioned = false;
 
         private IPositionIncrementAttribute posIncrAtt;
 
-        /**
+        /*
          * Constructs a PositionFilter that assigns a position increment of zero to
          * all but the first token from the given input stream.
          * 
@@ -54,7 +54,7 @@ namespace Lucene.Net.Analysis.Position
             posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
         }
 
-        /**
+        /*
          * Constructs a PositionFilter that assigns the given position increment to
          * all but the first token from the given input stream.
          * 

Modified: lucene.net/trunk/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs Fri Oct  5 21:22:51 2012
@@ -31,7 +31,7 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Query
 {
-/**
+/*
  * An {@link Analyzer} used primarily at query time to wrap another analyzer and provide a layer of protection
  * which prevents very common words from being passed into queries. 
  * <p>
@@ -53,7 +53,7 @@ public class QueryAutoStopWordAnalyzer :
   public const float defaultMaxDocFreqPercent = 0.4f;
   private readonly Version matchVersion;
 
-  /**
+  /*
    * Initializes this analyzer with the Analyzer object that actually produces the tokens
    *
    * @param _delegate The choice of {@link Analyzer} that is used to produce the token stream which needs filtering
@@ -65,7 +65,7 @@ public class QueryAutoStopWordAnalyzer :
     this.matchVersion = matchVersion;
   }
 
-  /**
+  /*
    * Automatically adds stop words for all fields with terms exceeding the defaultMaxDocFreqPercent
    *
    * @param reader The {@link IndexReader} which will be consulted to identify potential stop words that
@@ -78,7 +78,7 @@ public class QueryAutoStopWordAnalyzer :
     return AddStopWords(reader, defaultMaxDocFreqPercent);
   }
 
-  /**
+  /*
    * Automatically adds stop words for all fields with terms exceeding the maxDocFreqPercent
    *
    * @param reader     The {@link IndexReader} which will be consulted to identify potential stop words that
@@ -99,7 +99,7 @@ public class QueryAutoStopWordAnalyzer :
     return numStopWords;
   }
 
-  /**
+  /*
    * Automatically adds stop words for all fields with terms exceeding the maxDocFreqPercent
    *
    * @param reader        The {@link IndexReader} which will be consulted to identify potential stop words that
@@ -120,7 +120,7 @@ public class QueryAutoStopWordAnalyzer :
     return numStopWords;
   }
 
-  /**
+  /*
    * Automatically adds stop words for the given field with terms exceeding the maxPercentDocs
    *
    * @param reader         The {@link IndexReader} which will be consulted to identify potential stop words that
@@ -136,7 +136,7 @@ public class QueryAutoStopWordAnalyzer :
     return AddStopWords(reader, fieldName, (int) (reader.NumDocs() * maxPercentDocs));
   }
 
-  /**
+  /*
    * Automatically adds stop words for the given field with terms exceeding the maxPercentDocs
    *
    * @param reader     The {@link IndexReader} which will be consulted to identify potential stop words that
@@ -261,7 +261,7 @@ public class QueryAutoStopWordAnalyzer :
     return streams.WithStopFilter;
   }
 
-  /**
+  /*
    * Provides information on which stop words have been identified for a field
    *
    * @param fieldName The field for which stop words identified in "addStopWords"
@@ -279,7 +279,7 @@ public class QueryAutoStopWordAnalyzer :
     return result;
   }
 
-  /**
+  /*
    * Provides information on which stop words have been identified for all fields
    *
    * @return the stop words (as terms)

Modified: lucene.net/trunk/src/contrib/Analyzers/Reverse/ReverseStringFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Reverse/ReverseStringFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Reverse/ReverseStringFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Reverse/ReverseStringFilter.cs Fri Oct  5 21:22:51 2012
@@ -27,10 +27,10 @@ using Lucene.Net.Analysis.Tokenattribute
 
 namespace Lucene.Net.Analysis.Reverse
 {
-    /**
+    /*
      * Reverse token string, for example "country" => "yrtnuoc".
      * <p>
-     * If <code>marker</code> is supplied, then tokens will be also prepended by
+     * If <c>marker</c> is supplied, then tokens will be also prepended by
      * that character. For example, with a marker of &#x5C;u0001, "country" =>
      * "&#x5C;u0001yrtnuoc". This is useful when implementing efficient leading
      * wildcards search.
@@ -43,27 +43,27 @@ namespace Lucene.Net.Analysis.Reverse
         private readonly char marker;
         private const char NOMARKER = '\uFFFF';
 
-        /**
+        /*
          * Example marker character: U+0001 (START OF HEADING) 
          */
         public const char START_OF_HEADING_MARKER = '\u0001';
 
-        /**
+        /*
          * Example marker character: U+001F (INFORMATION SEPARATOR ONE)
          */
         public const char INFORMATION_SEPARATOR_MARKER = '\u001F';
 
-        /**
+        /*
          * Example marker character: U+EC00 (PRIVATE USE AREA: EC00) 
          */
         public const char PUA_EC00_MARKER = '\uEC00';
 
-        /**
+        /*
          * Example marker character: U+200F (RIGHT-TO-LEFT MARK)
          */
         public const char RTL_DIRECTION_MARKER = '\u200F';
 
-        /**
+        /*
          * Create a new ReverseStringFilter that reverses all tokens in the 
          * supplied {@link TokenStream}.
          * <p>
@@ -78,11 +78,11 @@ namespace Lucene.Net.Analysis.Reverse
 
         }
 
-        /**
+        /*
          * Create a new ReverseStringFilter that reverses and marks all tokens in the
          * supplied {@link TokenStream}.
          * <p>
-         * The reversed tokens will be prepended (marked) by the <code>marker</code>
+         * The reversed tokens will be prepended (marked) by the <c>marker</c>
          * character.
          * </p>
          * 

Modified: lucene.net/trunk/src/contrib/Analyzers/Ru/RussianAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Ru/RussianAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Ru/RussianAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Ru/RussianAnalyzer.cs Fri Oct  5 21:22:51 2012
@@ -81,7 +81,7 @@ namespace Lucene.Net.Analysis.Ru
         {
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words.
          * @deprecated use {@link #RussianAnalyzer(Version, Set)} instead
          */
@@ -91,7 +91,7 @@ namespace Lucene.Net.Analysis.Ru
 
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words
          * 
          * @param matchVersion
@@ -105,7 +105,7 @@ namespace Lucene.Net.Analysis.Ru
             this.matchVersion = matchVersion;
         }
 
-        /**
+        /*
          * Builds an analyzer with the given stop words.
          * TODO: create a Set version of this ctor
          * @deprecated use {@link #RussianAnalyzer(Version, Set)} instead
@@ -115,7 +115,7 @@ namespace Lucene.Net.Analysis.Ru
         {
         }
 
-        /**
+        /*
          * Creates a {@link TokenStream} which tokenizes all the text in the 
          * provided {@link Reader}.
          *
@@ -140,7 +140,7 @@ namespace Lucene.Net.Analysis.Ru
             protected internal TokenStream result;
         };
 
-        /**
+        /*
          * Returns a (possibly reused) {@link TokenStream} which tokenizes all the text 
          * in the provided {@link Reader}.
          *

Modified: lucene.net/trunk/src/contrib/Analyzers/Ru/RussianLetterTokenizer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Ru/RussianLetterTokenizer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Ru/RussianLetterTokenizer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Ru/RussianLetterTokenizer.cs Fri Oct  5 21:22:51 2012
@@ -47,7 +47,7 @@ namespace Lucene.Net.Analysis.Ru
         {
         }
 
-        /**
+        /*
          * Collects only characters which satisfy
          * {@link Character#isLetter(char)}.
          */

Modified: lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemFilter.cs Fri Oct  5 21:22:51 2012
@@ -25,7 +25,7 @@ using Lucene.Net.Analysis.Tokenattribute
 
 namespace Lucene.Net.Analysis.Ru
 {
-    /**
+    /*
     * A {@link TokenFilter} that stems Russian words. 
     * <p>
     * The implementation was inspired by GermanStemFilter.
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.Ru
     */
     public sealed class RussianStemFilter : TokenFilter
     {
-        /**
+        /*
          * The actual token in the input stream.
          */
         private RussianStemmer stemmer = null;
@@ -48,7 +48,7 @@ namespace Lucene.Net.Analysis.Ru
             stemmer = new RussianStemmer();
             termAtt = AddAttribute<ITermAttribute>();
         }
-        /**
+        /*
          * Returns the next token in the stream, or null at EOS
          */
         public sealed override bool IncrementToken()
@@ -71,7 +71,7 @@ namespace Lucene.Net.Analysis.Ru
         // I don't get the point of this.  All methods in java are private, so they can't be
         // overridden...You can't really subclass any of its behavior.  I've commented it out,
         // as it doesn't compile as is. - cc
-        ////**
+        ////*
         // * Set a alternative/custom {@link RussianStemmer} for this filter.
         // */
         //public void SetStemmer(RussianStemmer stemmer)

Modified: lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemmer.cs Fri Oct  5 21:22:51 2012
@@ -24,7 +24,7 @@ using System.Text;
 
 namespace Lucene.Net.Analysis.Ru
 {
-    /**
+    /*
  * Russian stemming algorithm implementation (see http://snowball.sourceforge.net for detailed description).
  */
     public class RussianStemmer
@@ -248,14 +248,14 @@ namespace Lucene.Net.Analysis.Ru
                                                           new[] {O, S, T, SOFT}
                                                       };
 
-        /**
+        /*
          * RussianStemmer constructor comment.
          */
         public RussianStemmer()
         {
         }
 
-        /**
+        /*
          * Adjectival ending is an adjective ending,
          * optionally preceded by participle ending.
          * Creation date: (17/03/2002 12:14:58 AM)
@@ -276,7 +276,7 @@ namespace Lucene.Net.Analysis.Ru
             return true;
         }
 
-        /**
+        /*
          * Derivational endings
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -302,7 +302,7 @@ namespace Lucene.Net.Analysis.Ru
             }
         }
 
-        /**
+        /*
          * Finds ending among given ending class and returns the length of ending found(0, if not found).
          * Creation date: (17/03/2002 8:18:34 PM)
          */
@@ -342,7 +342,7 @@ namespace Lucene.Net.Analysis.Ru
             return findEnding(stemmingZone, stemmingZone.Length - 1, theEndingClass);
         }
 
-        /**
+        /*
          * Finds the ending among the given class of endings and removes it from stemming zone.
          * Creation date: (17/03/2002 8:18:34 PM)
          */
@@ -360,7 +360,7 @@ namespace Lucene.Net.Analysis.Ru
             }
         }
 
-        /**
+        /*
          * Finds the ending among the given class of endings, then checks if this ending was
          * preceded by any of given predecessors, and if so, removes it from stemming zone.
          * Creation date: (17/03/2002 8:18:34 PM)
@@ -390,7 +390,7 @@ namespace Lucene.Net.Analysis.Ru
 
         }
 
-        /**
+        /*
          * Marks positions of RV, R1 and R2 in a given word.
          * Creation date: (16/03/2002 3:40:11 PM)
          */
@@ -432,7 +432,7 @@ namespace Lucene.Net.Analysis.Ru
             R2 = i;
         }
 
-        /**
+        /*
          * Checks if character is a vowel..
          * Creation date: (16/03/2002 10:47:03 PM)
          * @return bool
@@ -448,7 +448,7 @@ namespace Lucene.Net.Analysis.Ru
             return false;
         }
 
-        /**
+        /*
          * Noun endings.
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -458,7 +458,7 @@ namespace Lucene.Net.Analysis.Ru
             return findAndRemoveEnding(stemmingZone, nounEndings);
         }
 
-        /**
+        /*
          * Perfective gerund endings.
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -472,7 +472,7 @@ namespace Lucene.Net.Analysis.Ru
                 || findAndRemoveEnding(stemmingZone, perfectiveGerundEndings2);
         }
 
-        /**
+        /*
          * Reflexive endings.
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -482,7 +482,7 @@ namespace Lucene.Net.Analysis.Ru
             return findAndRemoveEnding(stemmingZone, reflexiveEndings);
         }
 
-        /**
+        /*
          * Insert the method's description here.
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -501,7 +501,7 @@ namespace Lucene.Net.Analysis.Ru
             }
         }
 
-        /**
+        /*
          * Insert the method's description here.
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -520,7 +520,7 @@ namespace Lucene.Net.Analysis.Ru
             }
         }
 
-        /**
+        /*
          * Finds the stem for given Russian word.
          * Creation date: (16/03/2002 3:36:48 PM)
          * @return java.lang.String
@@ -558,7 +558,7 @@ namespace Lucene.Net.Analysis.Ru
             return input.Substring(0, RV) + stemmingZone.ToString();
         }
 
-        /**
+        /*
          * Superlative endings.
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -568,7 +568,7 @@ namespace Lucene.Net.Analysis.Ru
             return findAndRemoveEnding(stemmingZone, superlativeEndings);
         }
 
-        /**
+        /*
          * Undoubles N.
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -589,7 +589,7 @@ namespace Lucene.Net.Analysis.Ru
             }
         }
 
-        /**
+        /*
          * Verb endings.
          * Creation date: (17/03/2002 12:14:58 AM)
          * @param stemmingZone java.lang.StringBuilder
@@ -603,7 +603,7 @@ namespace Lucene.Net.Analysis.Ru
                 || findAndRemoveEnding(stemmingZone, verbEndings2);
         }
 
-        /**
+        /*
          * Static method for stemming.
          */
         public static String StemWord(String theWord)

Modified: lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs Fri Oct  5 21:22:51 2012
@@ -22,7 +22,7 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Shingle
 {
-    /**
+    /*
  * A ShingleAnalyzerWrapper wraps a {@link ShingleFilter} around another {@link Analyzer}.
  * <p>
  * A shingle is another name for a token based n-gram.
@@ -48,7 +48,7 @@ namespace Lucene.Net.Analysis.Shingle
             this.maxShingleSize = maxShingleSize;
         }
 
-        /**
+        /*
          * Wraps {@link StandardAnalyzer}. 
          */
         public ShingleAnalyzerWrapper(Version matchVersion)
@@ -57,7 +57,7 @@ namespace Lucene.Net.Analysis.Shingle
             SetOverridesTokenStreamMethod<ShingleAnalyzerWrapper>();
         }
 
-        /**
+        /*
          * Wraps {@link StandardAnalyzer}. 
          */
         public ShingleAnalyzerWrapper(Version matchVersion, int nGramSize)

Modified: lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleFilter.cs Fri Oct  5 21:22:51 2012
@@ -24,7 +24,7 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Shingle
 {
-    /**
+    /*
  * <p>A ShingleFilter constructs shingles (token n-grams) from a token stream.
  * In other words, it creates combinations of tokens as a single token.
  *
@@ -42,36 +42,36 @@ namespace Lucene.Net.Analysis.Shingle
         private StringBuilder[] shingles;
         private String tokenType = "shingle";
 
-        /**
+        /*
          * filler token for when positionIncrement is more than 1
          */
         public static readonly char[] FILLER_TOKEN = { '_' };
 
 
-        /**
+        /*
          * default maximum shingle size is 2.
          */
         public const int DEFAULT_MAX_SHINGLE_SIZE = 2;
 
-        /**
+        /*
          * The string to use when joining adjacent tokens to form a shingle
          */
         public const String TOKEN_SEPARATOR = " ";
 
-        /**
+        /*
          * By default, we output unigrams (individual tokens) as well as shingles
          * (token n-grams).
          */
         private bool outputUnigrams = true;
 
-        /**
+        /*
          * maximum shingle size (number of tokens)
          */
         private int maxShingleSize;
 
-        /**
+        /*
          * Constructs a ShingleFilter with the specified single size from the
-         * {@link TokenStream} <code>input</code>
+         * {@link TokenStream} <c>input</c>
          *
          * @param input input stream
          * @param maxShingleSize maximum shingle size produced by the filter.
@@ -86,7 +86,7 @@ namespace Lucene.Net.Analysis.Shingle
             this.typeAtt = AddAttribute<ITypeAttribute>(); ;
         }
 
-        /**
+        /*
          * Construct a ShingleFilter with default shingle size.
          *
          * @param input input stream
@@ -96,7 +96,7 @@ namespace Lucene.Net.Analysis.Shingle
         {
         }
 
-        /**
+        /*
          * Construct a ShingleFilter with the specified token type for shingle tokens.
          *
          * @param input input stream
@@ -108,7 +108,7 @@ namespace Lucene.Net.Analysis.Shingle
             setTokenType(tokenType);
         }
 
-        /**
+        /*
          * Set the type of the shingle tokens produced by this filter.
          * (default: "shingle")
          *
@@ -119,7 +119,7 @@ namespace Lucene.Net.Analysis.Shingle
             this.tokenType = tokenType;
         }
 
-        /**
+        /*
          * Shall the output stream contain the input tokens (unigrams) as well as
          * shingles? (default: true.)
          *
@@ -131,7 +131,7 @@ namespace Lucene.Net.Analysis.Shingle
             this.outputUnigrams = outputUnigrams;
         }
 
-        /**
+        /*
          * Set the max shingle size (default: 2)
          *
          * @param maxShingleSize max size of output shingles
@@ -150,7 +150,7 @@ namespace Lucene.Net.Analysis.Shingle
             this.maxShingleSize = maxShingleSize;
         }
 
-        /**
+        /*
          * Clear the StringBuilders that are used for storing the output shingles.
          */
         private void ClearShingles()
@@ -242,7 +242,7 @@ namespace Lucene.Net.Analysis.Shingle
         private IPositionIncrementAttribute posIncrAtt;
         private ITypeAttribute typeAtt;
 
-        /**
+        /*
          * Get the next token from the input stream and push it on the token buffer.
          * If we encounter a token with position increment > 1, we put filler tokens
          * on the token buffer.
@@ -294,7 +294,7 @@ namespace Lucene.Net.Analysis.Shingle
             }
         }
 
-        /**
+        /*
          * Fill the output buffer with new shingles.
          *
          * @throws IOException if there's a problem getting the next token

Modified: lucene.net/trunk/src/contrib/Analyzers/Sinks/DateRecognizerSinkFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Sinks/DateRecognizerSinkFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Sinks/DateRecognizerSinkFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Sinks/DateRecognizerSinkFilter.cs Fri Oct  5 21:22:51 2012
@@ -28,7 +28,7 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Sinks
 {
-    /**
+    /*
   * Attempts to parse the {@link org.apache.lucene.analysis.Token#termBuffer()} as a Date using a <see cref="System.IFormatProvider"/>.
   * If the value is a Date, it will add it to the sink.
   * <p/> 
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Sinks
         protected IFormatProvider dateFormat;
         protected ITermAttribute termAtt;
 
-        /**
+        /*
          * Uses <see cref="System.Globalization.CultureInfo.CurrentCulture.DateTimeFormatInfo"/> as the <see cref="IFormatProvider"/> object.
          */
         public DateRecognizerSinkFilter()

Modified: lucene.net/trunk/src/contrib/Analyzers/Th/ThaiAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Th/ThaiAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Th/ThaiAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Th/ThaiAnalyzer.cs Fri Oct  5 21:22:51 2012
@@ -29,7 +29,7 @@ using Version=Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Th
 {
-    /**
+    /*
      * {@link Analyzer} for Thai language. It uses {@link java.text.BreakIterator} to break words.
      * @version 0.2
      *

Modified: lucene.net/trunk/src/contrib/Analyzers/Th/ThaiWordFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Th/ThaiWordFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Th/ThaiWordFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Th/ThaiWordFilter.cs Fri Oct  5 21:22:51 2012
@@ -28,7 +28,7 @@ using Lucene.Net.Analysis.Tokenattribute
 
 namespace Lucene.Net.Analysis.Th
 {
-    /**
+    /*
      * {@link TokenFilter} that use {@link java.text.BreakIterator} to break each 
      * Token that is Thai into separate Token(s) for each Thai word.
      * <p>WARNING: this filter may not work correctly with all JREs.

Modified: lucene.net/trunk/src/contrib/Analyzers/WordlistLoader.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/WordlistLoader.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/WordlistLoader.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/WordlistLoader.cs Fri Oct  5 21:22:51 2012
@@ -1,23 +1,23 @@
-///*
-// *
-// * Licensed to the Apache Software Foundation (ASF) under one
-// * or more contributor license agreements.  See the NOTICE file
-// * distributed with this work for additional information
-// * regarding copyright ownership.  The ASF licenses this file
-// * to you under the Apache License, Version 2.0 (the
-// * "License"); you may not use this file except in compliance
-// * with the License.  You may obtain a copy of the License at
-// *
-// *   http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing,
-// * software distributed under the License is distributed on an
-// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// * KIND, either express or implied.  See the License for the
-// * specific language governing permissions and limitations
-// * under the License.
-// *
-//*/
+/*
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*/
 
 //using System;
 //using System.IO;

Modified: lucene.net/trunk/src/contrib/Core/Index/FieldEnumerator.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Core/Index/FieldEnumerator.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Core/Index/FieldEnumerator.cs (original)
+++ lucene.net/trunk/src/contrib/Core/Index/FieldEnumerator.cs Fri Oct  5 21:22:51 2012
@@ -206,6 +206,8 @@ namespace Lucene.Net.Index
             /// </summary>
             /// <param name="termEnum">The TermEnum to wrap.</param>
             /// <param name="termDocs">The TermDocs to wrap.</param>
+            /// <param name="field"> </param>
+            /// <param name="enclosing"> </param>
             public TermEnumerator(TermEnum termEnum, TermDocs termDocs, string field, FieldEnumerator<T> enclosing)
             {
                 this.termEnum = termEnum;

Modified: lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldQuery.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldQuery.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldQuery.cs (original)
+++ lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldQuery.cs Fri Oct  5 21:22:51 2012
@@ -311,7 +311,7 @@ namespace Lucene.Net.Search.Vectorhighli
             return termSetMap.Get(fieldMatch ? field : null);
         }
 
-        /**
+        /*
          * 
          * <param name="fieldName"></param>
          * <param name="term"></param>
@@ -333,7 +333,7 @@ namespace Lucene.Net.Search.Vectorhighli
             return null;
         }
 
-        /**
+        /*
          * 
          * <param name="fieldName"></param>
          * <param name="phraseCandidate"></param>

Modified: lucene.net/trunk/src/contrib/FastVectorHighlighter/ScoreOrderFragmentsBuilder.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/FastVectorHighlighter/ScoreOrderFragmentsBuilder.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/FastVectorHighlighter/ScoreOrderFragmentsBuilder.cs (original)
+++ lucene.net/trunk/src/contrib/FastVectorHighlighter/ScoreOrderFragmentsBuilder.cs Fri Oct  5 21:22:51 2012
@@ -23,7 +23,7 @@ using WeightedFragInfo = Lucene.Net.Sear
 
 namespace Lucene.Net.Search.Vectorhighlight
 {
-    /**
+    /*
  * An implementation of FragmentsBuilder that outputs score-order fragments.
  */
     public class ScoreOrderFragmentsBuilder : BaseFragmentsBuilder

Modified: lucene.net/trunk/src/contrib/FastVectorHighlighter/package.html
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/FastVectorHighlighter/package.html?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/FastVectorHighlighter/package.html (original)
+++ lucene.net/trunk/src/contrib/FastVectorHighlighter/package.html Fri Oct  5 21:22:51 2012
@@ -65,7 +65,7 @@ sample text.</p>
 
 <h3>Step 1.</h3>
 <p>In Step 1, Fast Vector Highlighter generates {@link org.apache.lucene.search.vectorhighlight.FieldQuery.QueryPhraseMap} from the user query.
-<code>QueryPhraseMap</code> consists of the following members:</p>
+<c>QueryPhraseMap</c> consists of the following members:</p>
 <pre>
 public class QueryPhraseMap {
   boolean terminal;
@@ -74,13 +74,13 @@ public class QueryPhraseMap {
   Map&lt;String, QueryPhraseMap&gt; subMap;
 } 
 </pre>
-<p><code>QueryPhraseMap</code> has subMap. The key of the subMap is a term 
-text in the user query and the value is a subsequent <code>QueryPhraseMap</code>.
-If the query is a term (not phrase), then the subsequent <code>QueryPhraseMap</code>
-is marked as terminal. If the query is a phrase, then the subsequent <code>QueryPhraseMap</code>
+<p><c>QueryPhraseMap</c> has subMap. The key of the subMap is a term 
+text in the user query and the value is a subsequent <c>QueryPhraseMap</c>.
+If the query is a term (not phrase), then the subsequent <c>QueryPhraseMap</c>
+is marked as terminal. If the query is a phrase, then the subsequent <c>QueryPhraseMap</c>
 is not a terminal and it has the next term text in the phrase.</p>
 
-<p>From the sample user query, the following <code>QueryPhraseMap</code> 
+<p>From the sample user query, the following <c>QueryPhraseMap</c> 
 will be generated:</p>
 <pre>
    QueryPhraseMap
@@ -96,8 +96,8 @@ will be generated:</p>
 <h3>Step 2.</h3>
 <p>In Step 2, Fast Vector Highlighter generates {@link org.apache.lucene.search.vectorhighlight.FieldTermStack}. Fast Vector Highlighter uses {@link org.apache.lucene.index.TermFreqVector} data
 (must be stored {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS_OFFSETS})
-to generate it. <code>FieldTermStack</code> keeps the terms in the user query.
-Therefore, in this sample case, Fast Vector Highlighter generates the following <code>FieldTermStack</code>:</p>
+to generate it. <c>FieldTermStack</c> keeps the terms in the user query.
+Therefore, in this sample case, Fast Vector Highlighter generates the following <c>FieldTermStack</c>:</p>
 <pre>
    FieldTermStack
 +------------------+
@@ -111,7 +111,7 @@ where : "termText"(startOffset,endOffset
 </pre>
 <h3>Step 3.</h3>
 <p>In Step 3, Fast Vector Highlighter generates {@link org.apache.lucene.search.vectorhighlight.FieldPhraseList}
-by reference to <code>QueryPhraseMap</code> and <code>FieldTermStack</code>.</p>
+by reference to <c>QueryPhraseMap</c> and <c>FieldTermStack</c>.</p>
 <pre>
    FieldPhraseList
 +----------------+-----------------+---+
@@ -120,14 +120,14 @@ by reference to <code>QueryPhraseMap</co
 |"search library"|[(12,18),(26,33)]|w=1|
 +----------------+-----------------+---+
 </pre>
-<p>The type of each entry is <code>WeightedPhraseInfo</code> that consists of
+<p>The type of each entry is <c>WeightedPhraseInfo</c> that consists of
 an array of terms offsets and weight. The weight (Fast Vector Highlighter uses query boost to
 calculate the weight) will be taken into account when Fast Vector Highlighter creates
 {@link org.apache.lucene.search.vectorhighlight.FieldFragList} in the next step.</p>
 <h3>Step 4.</h3>
-<p>In Step 4, Fast Vector Highlighter creates <code>FieldFragList</code> by reference to
-<code>FieldPhraseList</code>. In this sample case, the following
-<code>FieldFragList</code> will be generated:</p>
+<p>In Step 4, Fast Vector Highlighter creates <c>FieldFragList</c> by reference to
+<c>FieldPhraseList</c>. In this sample case, the following
+<c>FieldFragList</c> will be generated:</p>
 <pre>
    FieldFragList
 +---------------------------------+
@@ -137,7 +137,7 @@ calculate the weight) will be taken into
 +---------------------------------+
 </pre>
 <h3>Step 5.</h3>
-<p>In Step 5, by using <code>FieldFragList</code> and the field stored data,
+<p>In Step 5, by using <c>FieldFragList</c> and the field stored data,
 Fast Vector Highlighter creates highlighted snippets!</p>
 
 </body>

Modified: lucene.net/trunk/src/contrib/Highlighter/Highlighter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Highlighter/Highlighter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Highlighter/Highlighter.cs (original)
+++ lucene.net/trunk/src/contrib/Highlighter/Highlighter.cs Fri Oct  5 21:22:51 2012
@@ -64,7 +64,7 @@ namespace Lucene.Net.Search.Highlight
         /// Highlights chosen terms in a text, extracting the most relevant section.
         /// This is a convenience method that calls <see cref="GetBestFragment(TokenStream, string)"/>
         /// </summary>
-        /// <param name="analyzer">the analyzer that will be used to split <code>text</code> into chunks</param>
+        /// <param name="analyzer">the analyzer that will be used to split <c>text</c> into chunks</param>
         /// <param name="fieldName">Name of field used to influence analyzer's tokenization policy</param>
         /// <param name="text">text to highlight terms in</param>
         /// <returns>highlighted text fragment or null if no terms found</returns>
@@ -105,7 +105,7 @@ namespace Lucene.Net.Search.Highlight
         /// Highlights chosen terms in a text, extracting the most relevant sections.
         /// This is a convenience method that calls <see cref="GetBestFragments(TokenStream, string, int)"/>
         /// </summary>
-        /// <param name="analyzer">the analyzer that will be used to split <code>text</code> into chunks</param>
+        /// <param name="analyzer">the analyzer that will be used to split <c>text</c> into chunks</param>
         /// <param name="fieldName">the name of the field being highlighted (used by analyzer)</param>
         /// <param name="text">text to highlight terms in</param>
         /// <param name="maxNumFragments">the maximum number of fragments.</param>

Modified: lucene.net/trunk/src/contrib/Highlighter/QueryScorer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Highlighter/QueryScorer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Highlighter/QueryScorer.cs (original)
+++ lucene.net/trunk/src/contrib/Highlighter/QueryScorer.cs Fri Oct  5 21:22:51 2012
@@ -131,7 +131,7 @@ namespace Lucene.Net.Search.Highlight
             skipInitExtractor = true;
         }
 
-        /// <seealso cref="IScorer.GetFragmentScore()"/>
+        /// <seealso cref="IScorer.FragmentScore"/>
         public float FragmentScore
         {
             get { return totalScore; }

Modified: lucene.net/trunk/src/contrib/Highlighter/QueryTermScorer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Highlighter/QueryTermScorer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Highlighter/QueryTermScorer.cs (original)
+++ lucene.net/trunk/src/contrib/Highlighter/QueryTermScorer.cs Fri Oct  5 21:22:51 2012
@@ -30,7 +30,7 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Search.Highlight
 {
-    /**
+    /*
  * {@link Scorer} implementation which scores text fragments by the number of
  * unique query terms found. This class uses the {@link QueryTermExtractor}
  * class to process determine the query terms and their boosts to be used.
@@ -48,7 +48,7 @@ namespace Lucene.Net.Search.Highlight
 
         private ITermAttribute termAtt;
 
-        /**
+        /*
          * 
          * @param query a Lucene query (ideally rewritten using query.rewrite before
          *        being passed to this class and the searcher)
@@ -59,7 +59,7 @@ namespace Lucene.Net.Search.Highlight
         {
         }
 
-        /**
+        /*
          * 
          * @param query a Lucene query (ideally rewritten using query.rewrite before
          *        being passed to this class and the searcher)
@@ -71,7 +71,7 @@ namespace Lucene.Net.Search.Highlight
         {
         }
 
-        /**
+        /*
          * 
          * @param query a Lucene query (ideally rewritten using query.rewrite before
          *        being passed to this class and the searcher)
@@ -176,7 +176,7 @@ namespace Lucene.Net.Search.Highlight
             // this class has no special operations to perform at end of processing
         }
 
-        /**
+        /*
          * 
          * @return The highest weighted term (useful for passing to GradientFormatter
          *         to set top end of coloring scale.

Modified: lucene.net/trunk/src/contrib/Highlighter/SimpleFragmenter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Highlighter/SimpleFragmenter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Highlighter/SimpleFragmenter.cs (original)
+++ lucene.net/trunk/src/contrib/Highlighter/SimpleFragmenter.cs Fri Oct  5 21:22:51 2012
@@ -39,7 +39,7 @@ namespace Lucene.Net.Search.Highlight
         {
         }
 
-        /**
+        /*
          * 
          * @param fragmentSize size in number of characters of each fragment
          */

Modified: lucene.net/trunk/src/contrib/Highlighter/SimpleHTMLEncoder.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Highlighter/SimpleHTMLEncoder.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Highlighter/SimpleHTMLEncoder.cs (original)
+++ lucene.net/trunk/src/contrib/Highlighter/SimpleHTMLEncoder.cs Fri Oct  5 21:22:51 2012
@@ -32,7 +32,7 @@ namespace Lucene.Net.Search.Highlight
             return HtmlEncode(originalText);
         }
 
-        /**
+        /*
          * Encode string into HTML
          */
         public static String HtmlEncode(String plainText)

Modified: lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTerm.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTerm.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTerm.cs (original)
+++ lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTerm.cs Fri Oct  5 21:22:51 2012
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTermExtractor.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTermExtractor.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTermExtractor.cs (original)
+++ lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTermExtractor.cs Fri Oct  5 21:22:51 2012
@@ -353,7 +353,7 @@ namespace Lucene.Net.Search.Highlight
         }
 
         /// <summary>
-        /// Necessary to implement matches for queries against <code>defaultField</code>
+        /// Necessary to implement matches for queries against <c>defaultField</c>
         /// </summary>
         private bool FieldNameComparator(String fieldNameToCheck)
         {



Mime
View raw message