lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aro...@apache.org
Subject svn commit: r411501 [13/30] - in /incubator/lucene.net/trunk/C#/src: ./ Demo/DeleteFiles/ Demo/DemoLib/ Demo/DemoLib/HTML/ Demo/IndexFiles/ Demo/IndexHtml/ Demo/SearchFiles/ Lucene.Net/ Lucene.Net/Analysis/ Lucene.Net/Analysis/Standard/ Lucene.Net/Docu...
Date Sun, 04 Jun 2006 02:41:25 GMT
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Lucene.Net.xml
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Lucene.Net.xml?rev=411501&r1=411500&r2=411501&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Lucene.Net.xml (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Lucene.Net.xml Sat Jun  3 19:41:13 2006
@@ -4,530 +4,20 @@
         <name>Lucene.Net</name>
     </assembly>
     <members>
-        <member name="T:Lucene.Net.Analysis.DE.GermanAnalyzer">
-            <summary> Analyzer for German language. Supports an external list of stopwords (words that
-            will not be indexed at all) and an external list of exclusions (word that will
-            not be stemmed, but indexed).
-            A default set of stopwords is used unless an alternative list is specified, the
-            exclusion list is empty by default.
-            
-            </summary>
-            <author>  Gerhard Schwarz
-            </author>
-            <version>  $Id: GermanAnalyzer.java,v 1.16 2004/05/30 20:24:20 otis Exp $
-            </version>
-        </member>
-        <member name="T:Lucene.Net.Analysis.Analyzer">
-            <summary>An Analyzer builds TokenStreams, which analyze text.  It thus represents a
-            policy for extracting index terms from text.
-            <p>
-            Typical implementations first build a Tokenizer, which breaks the stream of
-            characters from the Reader into raw Tokens.  One or more TokenFilters may
-            then be applied to the output of the Tokenizer.
-            </p>
-            <p>
-            WARNING: You must override one of the methods defined by this class in your
-            subclass or the Analyzer will enter an infinite loop.
-            </p>
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Analyzer.TokenStream(System.String,System.IO.TextReader)">
-            <summary>Creates a TokenStream which tokenizes all the text in the provided
-            Reader.  Default implementation forwards to tokenStream(Reader) for 
-            compatibility with older version.  Override to allow Analyzer to choose 
-            strategy based on document and/or Field.  Must be able to handle null
-            Field name for backward compatibility. 
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Analyzer.TokenStream(System.IO.TextReader)">
-            <summary>Creates a TokenStream which tokenizes all the text in the provided
-            Reader.  Provided for backward compatibility only.
-            </summary>
-            <deprecated> use TokenStream(String, Reader) instead.
-            </deprecated>
-            <seealso cref="!:Reader)">
-            </seealso>
-        </member>
-        <member name="F:Lucene.Net.Analysis.DE.GermanAnalyzer.GERMAN_STOP_WORDS">
-            <summary> List of typical german stopwords.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.DE.GermanAnalyzer.stopSet">
-            <summary> Contains the stopwords used with the StopFilter.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.DE.GermanAnalyzer.exclusionSet">
-            <summary> Contains words that should be indexed but not stemmed.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanAnalyzer.#ctor">
-            <summary> Builds an analyzer.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanAnalyzer.#ctor(System.String[])">
-            <summary> Builds an analyzer with the given stop words.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanAnalyzer.#ctor(System.Collections.Hashtable)">
-            <summary> Builds an analyzer with the given stop words.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanAnalyzer.#ctor(System.IO.FileInfo)">
-            <summary> Builds an analyzer with the given stop words.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanAnalyzer.SetStemExclusionTable(System.String[])">
-            <summary> Builds an exclusionlist from an array of Strings.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanAnalyzer.SetStemExclusionTable(System.Collections.Hashtable)">
-            <summary> Builds an exclusionlist from a Hashtable.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanAnalyzer.SetStemExclusionTable(System.IO.FileInfo)">
-            <summary> Builds an exclusionlist from the words contained in the given file.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanAnalyzer.TokenStream(System.String,System.IO.TextReader)">
-            <summary> Creates a TokenStream which tokenizes all the text in the provided Reader.
-            
-            </summary>
-            <returns> A TokenStream build from a StandardTokenizer filtered with
-            StandardFilter, LowerCaseFilter, StopFilter, GermanStemFilter
-            </returns>
-        </member>
-        <member name="T:Lucene.Net.Analysis.DE.GermanStemFilter">
-            <summary> A filter that stems German words. It supports a table of words that should
-            not be stemmed at all. The stemmer used can be changed at runtime after the
-            filter object is created (as long as it is a GermanStemmer).
-            
-            </summary>
-            <author>     Gerhard Schwarz
-            </author>
-            <version>    $Id: GermanStemFilter.java,v 1.8 2004/03/30 15:54:48 otis Exp $
-            </version>
-        </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.TokenFilter" -->
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.TokenStream" -->
-        <member name="M:Lucene.Net.Analysis.TokenStream.Next">
-            <summary>Returns the next token in the stream, or null at EOS. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.TokenStream.Close">
-            <summary>Releases resources associated with this stream. </summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.TokenFilter.input">
-            <summary>The source of tokens for this filter. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.TokenFilter.#ctor">
-            <summary>Call TokenFilter(TokenStream) instead.</summary>
-            <deprecated> 
-            </deprecated>
-        </member>
-        <member name="M:Lucene.Net.Analysis.TokenFilter.#ctor(Lucene.Net.Analysis.TokenStream)">
-            <summary>Construct a token stream filtering the given input. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.TokenFilter.Close">
-            <summary>Close the input TokenStream. </summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.DE.GermanStemFilter.token">
-            <summary> The actual token in the input stream.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.Collections.Hashtable)">
-            <summary> Builds a GermanStemFilter that uses an exclusiontable.</summary>
-            <deprecated> Use {@link #GermanStemFilter(Lucene.Net.Analysis.TokenStream, java.util.Set)} instead.
-            </deprecated>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemFilter.Next">
-            <returns>  Returns the next token in the stream, or null at EOS
-            </returns>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemFilter.SetStemmer(Lucene.Net.Analysis.DE.GermanStemmer)">
-            <summary> Set a alternative/custom GermanStemmer for this filter.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemFilter.SetExclusionTable(System.Collections.Hashtable)">
-            <summary> Set an alternative exclusion list for this filter.</summary>
-            <deprecated> Use {@link #SetExclusionSet(java.util.Set)} instead.
-            </deprecated>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemFilter.SetExclusionSet(System.Collections.Hashtable)">
-            <summary> Set an alternative exclusion list for this filter.</summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.DE.GermanStemmer">
-            <summary> A stemmer for German words. The algorithm is based on the report
-            "A Fast and Simple Stemming Algorithm for German Words" by Jörg
-            Caumanns (joerg.caumanns@isst.fhg.de).
-            
-            </summary>
-            <author>     Gerhard Schwarz
-            </author>
-            <version>    $Id: GermanStemmer.java,v 1.11 2004/05/30 20:24:20 otis Exp $
-            </version>
-        </member>
-        <member name="F:Lucene.Net.Analysis.DE.GermanStemmer.sb">
-            <summary> Buffer for the terms while stemming them.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.DE.GermanStemmer.substCount">
-            <summary> Amount of characters that are removed with <tt>substitute()</tt> while stemming.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemmer.Stem(System.String)">
-            <summary> Stemms the given term to an unique <tt>discriminator</tt>.
-            
-            </summary>
-            <param name="term"> The term that should be stemmed.
-            </param>
-            <returns>      Discriminator for <tt>term</tt>
-            </returns>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemmer.IsStemmable(System.String)">
-            <summary> Checks if a term could be stemmed.
-            
-            </summary>
-            <returns>  true if, and only if, the given term consists in letters.
-            </returns>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemmer.Strip(System.Text.StringBuilder)">
-            <summary> suffix stripping (stemming) on the current term. The stripping is reduced
-            to the seven "base" suffixes "e", "s", "n", "t", "em", "er" and * "nd",
-            from which all regular suffixes are build of. The simplification causes
-            some overstemming, and way more irregular stems, but still provides unique.
-            discriminators in the most of those cases.
-            The algorithm is context free, except of the length restrictions.
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemmer.Optimize(System.Text.StringBuilder)">
-            <summary> Does some optimizations on the term. This optimisations are
-            contextual.
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemmer.RemoveParticleDenotion(System.Text.StringBuilder)">
-            <summary> Removes a particle denotion ("ge") from a term.</summary>
-        </member>
-        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Analysis.DE.GermanStemmer.Substitute(System.Text.StringBuilder)" -->
-        <member name="M:Lucene.Net.Analysis.DE.GermanStemmer.Resubstitute(System.Text.StringBuilder)">
-            <summary> Undoes the changes made by substitute(). That are character pairs and
-            character combinations. Umlauts will remain as their corresponding vowel,
-            as "ß" remains as "ss".
-            </summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.DE.WordlistLoader">
-            <summary> Loader for text files that represent a list of stopwords.
-            
-            </summary>
-            <author>  Gerhard Schwarz
-            </author>
-            <version>  $Id: WordlistLoader.java,v 1.10 2004/03/30 15:54:48 otis Exp $
-            
-            </version>
-            <todo>  this is not specific to German, it should be moved up </todo>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.WordlistLoader.GetWordSet(System.IO.FileInfo)">
-            <summary> Loads a text file and adds every line as an entry to a HashSet (omitting
-            leading and trailing whitespace). Every line of the file should contain only 
-            one word. The words need to be in lowercase if you make use of an
-            Analyzer which uses LowerCaseFilter (like GermanAnalyzer).
-            
-            </summary>
-            <param name="wordfile">File containing the wordlist
-            </param>
-            <returns> A HashSet with the file's words
-            </returns>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.WordlistLoader.GetWordtable(System.String,System.String)">
-            <param name="path">     Path to the wordlist
-            </param>
-            <param name="wordfile"> Name of the wordlist
-            
-            </param>
-            <deprecated> Use {@link #GetWordSet(File)} getWordSet(File)} instead
-            </deprecated>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.WordlistLoader.GetWordtable(System.String)">
-            <param name="wordfile"> Complete path to the wordlist
-            
-            </param>
-            <deprecated> Use {@link #GetWordSet(File)} getWordSet(File)} instead
-            </deprecated>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.WordlistLoader.GetWordtable(System.IO.FileInfo)">
-            <param name="wordfile"> File object that points to the wordlist
-            
-            </param>
-            <deprecated> Use {@link #GetWordSet(File)} getWordSet(File)} instead
-            </deprecated>
-        </member>
-        <member name="M:Lucene.Net.Analysis.DE.WordlistLoader.MakeWordTable(System.Collections.Hashtable)">
-            <summary> Builds a wordlist table, using words as both keys and values
-            for backward compatibility.
-            
-            </summary>
-            <param name="wordSet">  stopword set
-            </param>
-        </member>
-        <member name="T:Lucene.Net.Analysis.RU.RussianAnalyzer">
-            <summary> Analyzer for Russian language. Supports an external list of stopwords (words that
-            will not be indexed at all).
-            A default set of stopwords is used unless an alternative list is specified.
-            
-            </summary>
-            <author>   Boris Okner, b.okner@rogers.com
-            </author>
-            <version>  $Id: RussianAnalyzer.java,v 1.7 2004/03/29 22:48:01 cutting Exp $
-            </version>
-        </member>
-        <member name="F:Lucene.Net.Analysis.RU.RussianAnalyzer.RUSSIAN_STOP_WORDS">
-            <summary> List of typical Russian stopwords.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.RU.RussianAnalyzer.stopSet">
-            <summary> Contains the stopwords used with the StopFilter.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.RU.RussianAnalyzer.charset">
-            <summary> Charset for Russian letters.
-            Represents encoding for 32 lowercase Russian letters.
-            Predefined charsets can be taken from RussianCharSets class
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianAnalyzer.#ctor(System.Char[])">
-            <summary> Builds an analyzer.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianAnalyzer.#ctor(System.Char[],System.String[])">
-            <summary> Builds an analyzer with the given stop words.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianAnalyzer.#ctor(System.Char[],System.Collections.Hashtable)">
-            <summary> Builds an analyzer with the given stop words.</summary>
-            <todo>  create a Set version of this ctor </todo>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianAnalyzer.TokenStream(System.String,System.IO.TextReader)">
-            <summary> Creates a TokenStream which tokenizes all the text in the provided Reader.
-            
-            </summary>
-            <returns>  A TokenStream build from a RussianLetterTokenizer filtered with
-            RussianLowerCaseFilter, StopFilter, and RussianStemFilter
-            </returns>
-        </member>
-        <member name="T:Lucene.Net.Analysis.RU.RussianCharsets">
-            <summary> RussianCharsets class contains encodings schemes (charsets) and toLowerCase() method implementation
-            for russian characters in Unicode, KOI8 and CP1252.
-            Each encoding scheme contains lowercase (positions 0-31) and uppercase (position 32-63) characters.
-            One should be able to add other encoding schemes (like ISO-8859-5 or customized) by adding a new charset
-            and adding logic to toLowerCase() method for that charset.
-            
-            </summary>
-            <author>   Boris Okner, b.okner@rogers.com
-            </author>
-            <version>  $Id: RussianCharsets.java,v 1.3 2004/03/29 22:48:01 cutting Exp $
-            </version>
-        </member>
-        <member name="T:Lucene.Net.Analysis.RU.RussianLetterTokenizer">
-            <summary> A RussianLetterTokenizer is a tokenizer that extends LetterTokenizer by additionally looking up letters
-            in a given "russian charset". The problem with LeterTokenizer is that it uses Character.isLetter() method,
-            which doesn't know how to detect letters in encodings like CP1252 and KOI8
-            (well-known problems with 0xD7 and 0xF7 chars)
-            
-            </summary>
-            <author>   Boris Okner, b.okner@rogers.com
-            </author>
-            <version>  $Id: RussianLetterTokenizer.java,v 1.3 2004/03/29 22:48:01 cutting Exp $
-            </version>
-        </member>
-        <member name="T:Lucene.Net.Analysis.CharTokenizer">
-            <summary>An abstract base class for simple, character-oriented tokenizers.</summary>
-        </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.Tokenizer" -->
-        <member name="F:Lucene.Net.Analysis.Tokenizer.input">
-            <summary>The text source for this Tokenizer. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Tokenizer.#ctor">
-            <summary>Construct a tokenizer with null input. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Tokenizer.#ctor(System.IO.TextReader)">
-            <summary>Construct a token stream processing the given input. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Tokenizer.Close">
-            <summary>By default, closes the input Reader. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.CharTokenizer.IsTokenChar(System.Char)">
-            <summary>Returns true iff a character should be included in a token.  This
-            tokenizer generates as tokens adjacent sequences of characters which
-            satisfy this predicate.  Characters for which this is false are used to
-            define token boundaries and are not included in tokens. 
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.CharTokenizer.Normalize(System.Char)">
-            <summary>Called on each token character to normalize it before it is added to the
-            token.  The default implementation does nothing.  Subclasses may use this
-            to, e.g., lowercase tokens. 
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.CharTokenizer.Next">
-            <summary>Returns the next token in the stream, or null at EOS. </summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.RU.RussianLetterTokenizer.charset">
-            <summary>Construct a new LetterTokenizer. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianLetterTokenizer.IsTokenChar(System.Char)">
-            <summary> Collects only characters which satisfy
-            {@link Character#isLetter(char)}.
-            </summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.RU.RussianLowerCaseFilter">
-            <summary> Normalizes token text to lower case, analyzing given ("russian") charset.
-            
-            </summary>
-            <author>   Boris Okner, b.okner@rogers.com
-            </author>
-            <version>  $Id: RussianLowerCaseFilter.java,v 1.4 2004/03/29 22:48:01 cutting Exp $
-            </version>
-        </member>
-        <member name="T:Lucene.Net.Analysis.RU.RussianStemFilter">
-            <summary> A filter that stems Russian words. The implementation was inspired by GermanStemFilter.
-            The input should be filtered by RussianLowerCaseFilter before passing it to RussianStemFilter ,
-            because RussianStemFilter only works  with lowercase part of any "russian" charset.
-            
-            </summary>
-            <author>     Boris Okner, b.okner@rogers.com
-            </author>
-            <version>    $Id: RussianStemFilter.java,v 1.5 2004/03/29 22:48:01 cutting Exp $
-            </version>
-        </member>
-        <member name="F:Lucene.Net.Analysis.RU.RussianStemFilter.token">
-            <summary> The actual token in the input stream.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemFilter.Next">
-            <returns>  Returns the next token in the stream, or null at EOS
-            </returns>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemFilter.SetStemmer(Lucene.Net.Analysis.RU.RussianStemmer)">
-            <summary> Set a alternative/custom RussianStemmer for this filter.</summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.RU.RussianStemmer">
-            <summary> Russian stemming algorithm implementation (see http://snowball.sourceforge.net for detailed description).
+        <member name="T:Lucene.Net.Analysis.Standard.CharStream">
+            <summary> This interface describes a character stream that maintains line and
+            column number positions of the characters.  It also has the capability
+            to backup the stream to some extent.  An implementation of this
+            interface is used in the TokenManager implementation generated by
+            JavaCCParser.
             
+            All the methods except backup can be implemented in any fashion. backup
+            needs to be implemented correctly for the correct operation of the lexer.
+            Rest of the methods are all used to get information like line number,
+            column number and the String that constitutes a token and are not used
+            by the lexer. Hence their implementation won't affect the generated lexer's
+            operation.
             </summary>
-            <author>   Boris Okner, b.okner@rogers.com
-            </author>
-            <version>  $Id: RussianStemmer.java,v 1.5 2004/03/29 22:48:01 cutting Exp $
-            </version>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.#ctor">
-            <summary> RussianStemmer constructor comment.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.#ctor(System.Char[])">
-            <summary> RussianStemmer constructor comment.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.Adjectival(System.Text.StringBuilder)">
-            <summary> Adjectival ending is an adjective ending,
-            optionally preceded by participle ending.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.Derivational(System.Text.StringBuilder)">
-            <summary> Derivational endings
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.FindEnding(System.Text.StringBuilder,System.Int32,System.Char[][])">
-            <summary> Finds ending among given ending class and returns the length of ending found(0, if not found).
-            Creation date: (17/03/2002 8:18:34 PM)
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.FindAndRemoveEnding(System.Text.StringBuilder,System.Char[][])">
-            <summary> Finds the ending among the given class of endings and removes it from stemming zone.
-            Creation date: (17/03/2002 8:18:34 PM)
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.FindAndRemoveEnding(System.Text.StringBuilder,System.Char[][],System.Char[][])">
-            <summary> Finds the ending among the given class of endings, then checks if this ending was
-            preceded by any of given predessors, and if so, removes it from stemming zone.
-            Creation date: (17/03/2002 8:18:34 PM)
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.MarkPositions(System.String)">
-            <summary> Marks positions of RV, R1 and R2 in a given word.
-            Creation date: (16/03/2002 3:40:11 PM)
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.IsVowel(System.Char)">
-            <summary> Checks if character is a vowel..
-            Creation date: (16/03/2002 10:47:03 PM)
-            </summary>
-            <returns> boolean
-            </returns>
-            <param name="letter">char
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.Noun(System.Text.StringBuilder)">
-            <summary> Noun endings.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.PerfectiveGerund(System.Text.StringBuilder)">
-            <summary> Perfective gerund endings.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.Reflexive(System.Text.StringBuilder)">
-            <summary> Reflexive endings.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.RemoveI(System.Text.StringBuilder)">
-            <summary> Insert the method's description here.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.RemoveSoft(System.Text.StringBuilder)">
-            <summary> Insert the method's description here.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.SetCharset(System.Char[])">
-            <summary> Insert the method's description here.
-            Creation date: (16/03/2002 10:58:42 PM)
-            </summary>
-            <param name="newCharset">char[]
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.SetEndings">
-            <summary> Set ending definition as in Russian stemming algorithm.
-            Creation date: (16/03/2002 11:16:36 PM)
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.Stem(System.String)">
-            <summary> Finds the stem for given Russian word.
-            Creation date: (16/03/2002 3:36:48 PM)
-            </summary>
-            <returns> java.lang.String
-            </returns>
-            <param name="input">java.lang.String
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.Superlative(System.Text.StringBuilder)">
-            <summary> Superlative endings.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.UndoubleN(System.Text.StringBuilder)">
-            <summary> Undoubles N.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.Verb(System.Text.StringBuilder)">
-            <summary> Verb endings.
-            Creation date: (17/03/2002 12:14:58 AM)
-            </summary>
-            <param name="stemmingZone">java.lang.StringBuffer
-            </param>
-        </member>
-        <member name="M:Lucene.Net.Analysis.RU.RussianStemmer.Stem(System.String,System.Char[])">
-            <summary> Static method for stemming with different charsets</summary>
         </member>
         <member name="M:Lucene.Net.Analysis.Standard.CharStream.ReadChar">
             <summary> Returns the next character from the selected input.  The method
@@ -539,14 +29,14 @@
             <summary> Returns the column position of the character last read.</summary>
             <deprecated> 
             </deprecated>
-            <seealso cref="!:#getEndColumn">
+            <seealso cref="!:getEndColumn">
             </seealso>
         </member>
         <member name="M:Lucene.Net.Analysis.Standard.CharStream.GetLine">
             <summary> Returns the line number of the character last read.</summary>
             <deprecated> 
             </deprecated>
-            <seealso cref="!:#getEndLine">
+            <seealso cref="!:getEndLine">
             </seealso>
         </member>
         <member name="M:Lucene.Net.Analysis.Standard.CharStream.GetEndColumn">
@@ -691,23 +181,70 @@
         </member>
         <member name="T:Lucene.Net.Analysis.Standard.StandardAnalyzer">
             <summary> Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
-            LowerCaseFilter} and {@link StopFilter}.
+            LowerCaseFilter} and {@link StopFilter}, using a list of English stop words.
             
             </summary>
-            <version>  $Id: StandardAnalyzer.java,v 1.8 2004/03/29 22:48:01 cutting Exp $
+            <version>  $Id: StandardAnalyzer.java 219090 2005-07-14 20:36:28Z dnaber $
             </version>
         </member>
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.Analyzer" -->
+        <member name="M:Lucene.Net.Analysis.Analyzer.TokenStream(System.String,System.IO.TextReader)">
+            <summary>Creates a TokenStream which tokenizes all the text in the provided
+            Reader.  Default implementation forwards to tokenStream(Reader) for 
+            compatibility with older version.  Override to allow Analyzer to choose 
+            strategy based on document and/or field.  Must be able to handle null
+            field name for backward compatibility. 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.Analyzer.TokenStream(System.IO.TextReader)">
+            <summary>Creates a TokenStream which tokenizes all the text in the provided
+            Reader.  Provided for backward compatibility only.
+            </summary>
+            <deprecated> use tokenStream(String, Reader) instead.
+            </deprecated>
+            <seealso cref="!:TokenStream(String, Reader)">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Analysis.Analyzer.GetPositionIncrementGap(System.String)">
+            <summary> Invoked before indexing a Field instance if
+            terms have already been added to that field.  This allows custom
+            analyzers to place an automatic position increment gap between
+            Field instances using the same field name.  The default value
+            position increment gap is 0.  With a 0 position increment gap and
+            the typical default token position increment of 1, all terms in a field,
+            including across Field instances, are in successive positions, allowing
+            exact PhraseQuery matches, for instance, across Field instance boundaries.
+            
+            </summary>
+            <param name="fieldName">Field name being indexed.
+            </param>
+            <returns> position increment gap, added to the next token emitted from {@link #TokenStream(String,Reader)}
+            </returns>
+        </member>
         <member name="F:Lucene.Net.Analysis.Standard.StandardAnalyzer.STOP_WORDS">
             <summary>An array containing some common English words that are usually not
             useful for searching. 
             </summary>
         </member>
         <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor">
-            <summary>Builds an analyzer. </summary>
+            <summary>Builds an analyzer with the default stop words ({@link #STOP_WORDS}). </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor(System.Collections.Hashtable)">
+            <summary>Builds an analyzer with the given stop words. </summary>
         </member>
         <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor(System.String[])">
             <summary>Builds an analyzer with the given stop words. </summary>
         </member>
+        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor(System.IO.FileInfo)">
+            <summary>Builds an analyzer with the stop words from the given file.</summary>
+            <seealso cref="!:WordlistLoader.GetWordSet(File)">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor(System.IO.TextReader)">
+            <summary>Builds an analyzer with the stop words from the given reader.</summary>
+            <seealso cref="!:WordlistLoader.GetWordSet(Reader)">
+            </seealso>
+        </member>
         <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.TokenStream(System.String,System.IO.TextReader)">
             <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
             StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
@@ -716,11 +253,46 @@
         <member name="T:Lucene.Net.Analysis.Standard.StandardFilter">
             <summary>Normalizes tokens extracted with {@link StandardTokenizer}. </summary>
         </member>
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.TokenFilter" -->
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.TokenStream" -->
+        <member name="M:Lucene.Net.Analysis.TokenStream.Next">
+            <summary>Returns the next token in the stream, or null at EOS. </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.TokenStream.Close">
+            <summary>Releases resources associated with this stream. </summary>
+        </member>
+        <member name="F:Lucene.Net.Analysis.TokenFilter.input">
+            <summary>The source of tokens for this filter. </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.TokenFilter.#ctor">
+            <summary>Call TokenFilter(TokenStream) instead.</summary>
+            <deprecated> 
+            </deprecated>
+        </member>
+        <member name="M:Lucene.Net.Analysis.TokenFilter.#ctor(Lucene.Net.Analysis.TokenStream)">
+            <summary>Construct a token stream filtering the given input. </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.TokenFilter.Close">
+            <summary>Close the input TokenStream. </summary>
+        </member>
         <member name="M:Lucene.Net.Analysis.Standard.StandardFilter.#ctor(Lucene.Net.Analysis.TokenStream)">
             <summary>Construct filtering <i>in</i>. </summary>
         </member>
         <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Analysis.Standard.StandardFilter.Next" -->
         <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.Standard.StandardTokenizer" -->
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.Tokenizer" -->
+        <member name="F:Lucene.Net.Analysis.Tokenizer.input">
+            <summary>The text source for this Tokenizer. </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.Tokenizer.#ctor">
+            <summary>Construct a tokenizer with null input. </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.Tokenizer.#ctor(System.IO.TextReader)">
+            <summary>Construct a token stream processing the given input. </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.Tokenizer.Close">
+            <summary>By default, closes the input Reader. </summary>
+        </member>
         <member name="M:Lucene.Net.Analysis.Standard.StandardTokenizer.#ctor(System.IO.TextReader)">
             <summary>Constructs a tokenizer for this Reader. </summary>
         </member>
@@ -770,23 +342,23 @@
         <member name="F:Lucene.Net.Analysis.Standard.Token.next">
             <summary> A reference to the next regular (non-special) token from the input
             stream.  If this is the last token from the input stream, or if the
-            token manager has not read tokens beyond this one, this Field is
+            token manager has not read tokens beyond this one, this field is
             set to null.  This is true only if this token is also a regular
             token.  Otherwise, see below for a description of the contents of
-            this Field.
+            this field.
             </summary>
         </member>
         <member name="F:Lucene.Net.Analysis.Standard.Token.specialToken">
-            <summary> This Field is used to access special tokens that occur prior to this
+            <summary> This field is used to access special tokens that occur prior to this
             token, but after the immediately preceding regular (non-special) token.
-            If there are no such special tokens, this Field is set to null.
-            When there are more than one such special token, this Field refers
+            If there are no such special tokens, this field is set to null.
+            When there are more than one such special token, this field refers
             to the last of these special tokens, which in turn refers to the next
-            previous special token through its specialToken Field, and so on
-            until the first special token (whose specialToken Field is null).
+            previous special token through its specialToken field, and so on
+            until the first special token (whose specialToken field is null).
             The next fields of special tokens refer to other special tokens that
             immediately follow it (without an intervening regular token).  If there
-            is no such token, this Field is null.
+            is no such token, this field is null.
             </summary>
         </member>
         <member name="M:Lucene.Net.Analysis.Standard.Token.ToString">
@@ -850,6 +422,54 @@
             from this method for such cases in the release version of your parser.
             </summary>
         </member>
+        <member name="T:Lucene.Net.Analysis.CharTokenizer">
+            <summary>An abstract base class for simple, character-oriented tokenizers.</summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.CharTokenizer.IsTokenChar(System.Char)">
+            <summary>Returns true iff a character should be included in a token.  This
+            tokenizer generates as tokens adjacent sequences of characters which
+            satisfy this predicate.  Characters for which this is false are used to
+            define token boundaries and are not included in tokens. 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.CharTokenizer.Normalize(System.Char)">
+            <summary>Called on each token character to normalize it before it is added to the
+            token.  The default implementation does nothing.  Subclasses may use this
+            to, e.g., lowercase tokens. 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.CharTokenizer.Next">
+            <summary>Returns the next token in the stream, or null at EOS. </summary>
+        </member>
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.ISOLatin1AccentFilter" -->
+        <member name="M:Lucene.Net.Analysis.ISOLatin1AccentFilter.RemoveAccents(System.String)">
+            <summary> To replace accented characters in a String by unaccented equivalents.</summary>
+        </member>
+        <member name="T:Lucene.Net.Analysis.KeywordAnalyzer">
+            <summary> "Tokenizes" the entire stream as a single token. This is useful
+            for data like zip codes, ids, and some product names.
+            </summary>
+        </member>
+        <member name="T:Lucene.Net.Analysis.KeywordTokenizer">
+            <summary> Emits the entire input as a single token.</summary>
+        </member>
+        <member name="T:Lucene.Net.Analysis.LengthFilter">
+            <summary> Removes words that are too long and too short from the stream.
+            
+            </summary>
+            <author>  David Spencer
+            </author>
+            <version>  $Id: LengthFilter.java 347992 2005-11-21 21:41:43Z dnaber $
+            </version>
+        </member>
+        <member name="M:Lucene.Net.Analysis.LengthFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.Int32,System.Int32)">
+            <summary> Build a filter that removes words that are too long or too
+            short from the text.
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.LengthFilter.Next">
+            <summary> Returns the next input Token whose termText() is the right len</summary>
+        </member>
         <member name="T:Lucene.Net.Analysis.LetterTokenizer">
             <summary>A LetterTokenizer is a tokenizer that divides text at non-letters.  That's
             to say, it defines tokens as maximal strings of adjacent letters, as defined
@@ -870,7 +490,7 @@
             <summary> Normalizes token text to lower case.
             
             </summary>
-            <version>  $Id: LowerCaseFilter.java,v 1.4 2004/03/29 22:48:00 cutting Exp $
+            <version>  $Id: LowerCaseFilter.java 150259 2004-03-29 22:48:07Z cutting $
             </version>
         </member>
         <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.LowerCaseTokenizer" -->
@@ -882,13 +502,7 @@
             {@link Character#isLetter(char)}.
             </summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.PerFieldAnalyzerWrapper">
-            <summary> This analyzer is used to facilitate scenarios where different
-            fields require different analysis techniques.  Use {@link #addAnalyzer}
-            to add a non-default analyzer on a Field name basis.
-            See TestPerFieldAnalyzerWrapper.java for example usage.
-            </summary>
-        </member>
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.PerFieldAnalyzerWrapper" -->
         <member name="M:Lucene.Net.Analysis.PerFieldAnalyzerWrapper.#ctor(Lucene.Net.Analysis.Analyzer)">
             <summary> Constructs with default analyzer.
             
@@ -898,12 +512,12 @@
             </param>
         </member>
         <member name="M:Lucene.Net.Analysis.PerFieldAnalyzerWrapper.AddAnalyzer(System.String,Lucene.Net.Analysis.Analyzer)">
-            <summary> Defines an analyzer to use for the specified Field.
+            <summary> Defines an analyzer to use for the specified field.
             
             </summary>
-            <param name="fieldName">Field name requiring a non-default analyzer.
+            <param name="fieldName">field name requiring a non-default analyzer
             </param>
-            <param name="analyzer">non-default analyzer to use for Field
+            <param name="analyzer">non-default analyzer to use for field
             </param>
         </member>
         <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.PorterStemFilter" -->
@@ -921,13 +535,13 @@
         </member>
         <member name="M:Lucene.Net.Analysis.PorterStemmer.Reset">
             <summary> reset() resets the stemmer so it can stem another word.  If you invoke
-            the stemmer by calling add(char) and then stem(), you must call reset()
+            the stemmer by calling add(char) and then Stem(), you must call reset()
             before starting another word.
             </summary>
         </member>
         <member name="M:Lucene.Net.Analysis.PorterStemmer.Add(System.Char)">
             <summary> Add a character to the word being stemmed.  When you are finished
-            adding characters, you can call stem(void) to process the word.
+            adding characters, you can call Stem(void) to process the word.
             </summary>
         </member>
         <member name="M:Lucene.Net.Analysis.PorterStemmer.ToString">
@@ -995,9 +609,22 @@
         <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor">
             <summary>Builds an analyzer which removes words in ENGLISH_STOP_WORDS. </summary>
         </member>
+        <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor(System.Collections.Hashtable)">
+            <summary>Builds an analyzer with the stop words from the given set.</summary>
+        </member>
         <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor(System.String[])">
             <summary>Builds an analyzer which removes words in the provided array. </summary>
         </member>
+        <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor(System.IO.FileInfo)">
+            <summary>Builds an analyzer with the stop words from the given file.</summary>
+            <seealso cref="!:WordlistLoader.GetWordSet(File)">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor(System.IO.TextReader)">
+            <summary>Builds an analyzer with the stop words from the given reader.</summary>
+            <seealso cref="!:WordlistLoader.GetWordSet(Reader)">
+            </seealso>
+        </member>
         <member name="M:Lucene.Net.Analysis.StopAnalyzer.TokenStream(System.String,System.IO.TextReader)">
             <summary>Filters LowerCaseTokenizer with StopFilter. </summary>
         </member>
@@ -1005,6 +632,9 @@
             <summary> Removes stop words from a token stream.</summary>
         </member>
         <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.String[])">
+            <summary> Construct a token stream filtering the given input.</summary>
+        </member>
+        <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.String[],System.Boolean)">
             <summary> Constructs a filter which removes words from the input
             TokenStream that are named in the array of words.
             </summary>
@@ -1017,6 +647,34 @@
             <deprecated> Use {@link #StopFilter(TokenStream, Set)} instead
             </deprecated>
         </member>
+        <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.Collections.Hashtable,System.Boolean)">
+            <summary> Constructs a filter which removes words from the input
+            TokenStream that are named in the Hashtable.
+            If ignoreCase is true, all keys in the stopTable should already
+            be lowercased.
+            </summary>
+            <deprecated> Use {@link #StopFilter(TokenStream, Set)} instead
+            </deprecated>
+        </member>
+        <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.Collections.Hashtable,System.Boolean,System.Int32)">
+            <summary> Construct a token stream filtering the given input.</summary>
+            <param name="input">
+            </param>
+            <param name="stopWords">The set of Stop Words, as Strings.  If ignoreCase is true, all strings should be lower cased
+            </param>
+            <param name="ignoreCase">-Ignore case when stopping.  The stopWords set must be setup to contain only lower case words 
+            </param>
+        </member>
+        <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.Collections.Hashtable,System.Int32)">
+            <summary> Constructs a filter which removes words from the input
+            TokenStream that are named in the Set.
+            It is crucial that an efficient Set implementation is used
+            for maximum performance.
+            
+            </summary>
+            <seealso cref="!:MakeStopSet(java.lang.String[])">
+            </seealso>
+        </member>
         <member name="M:Lucene.Net.Analysis.StopFilter.MakeStopTable(System.String[])">
             <summary> Builds a Hashtable from an array of stop words,
             appropriate for passing into the StopFilter constructor.
@@ -1027,19 +685,40 @@
             <deprecated> Use {@link #MakeStopSet(String[])} instead.
             </deprecated>
         </member>
+        <member name="M:Lucene.Net.Analysis.StopFilter.makeStopTable(System.String[],System.Boolean)">
+            <summary> Builds a Hashtable from an array of stop words,
+            appropriate for passing into the StopFilter constructor.
+            This permits this table construction to be cached once when
+            an Analyzer is constructed.
+            </summary>
+            <deprecated> Use {@link #MakeStopSet(java.lang.String[], boolean)}  instead.
+            </deprecated>
+        </member>
         <member name="M:Lucene.Net.Analysis.StopFilter.MakeStopSet(System.String[])">
             <summary> Builds a Set from an array of stop words,
             appropriate for passing into the StopFilter constructor.
             This permits this stopWords construction to be cached once when
             an Analyzer is constructed.
+            
             </summary>
+            <seealso cref="!:MakeStopSet(java.lang.String[], boolean) passing false to ignoreCase">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Analysis.StopFilter.MakeStopSet(System.String[],System.Boolean)">
+            <summary> </summary>
+            <param name="stopWords">
+            </param>
+            <param name="ignoreCase">If true, all words are lower cased first.  
+            </param>
+            <returns> a Set containing the words
+            </returns>
         </member>
         <member name="M:Lucene.Net.Analysis.StopFilter.Next">
             <summary> Returns the next input Token whose termText() is not a stop word.</summary>
         </member>
         <member name="T:Lucene.Net.Analysis.Token">
-            <summary>A Token is an occurence of a term from the text of a Field.  It consists of
-            a term's text, the start and end offset of the term in the text of the Field,
+            <summary>A Token is an occurence of a term from the text of a field.  It consists of
+            a term's text, the start and end offset of the term in the text of the field,
             and a type string.
             The start and end offsets permit applications to re-associate a token with
             its source text, e.g., to display highlighted query terms in a document
@@ -1056,7 +735,7 @@
         <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Analysis.Token.SetPositionIncrement(System.Int32)" -->
         <member name="M:Lucene.Net.Analysis.Token.GetPositionIncrement">
             <summary>Returns the position increment of this Token.</summary>
-            <seealso cref="!:#setPositionIncrement">
+            <seealso cref="!:setPositionIncrement">
             </seealso>
         </member>
         <member name="M:Lucene.Net.Analysis.Token.TermText">
@@ -1094,6 +773,70 @@
             {@link Character#isWhitespace(char)}.
             </summary>
         </member>
+        <member name="T:Lucene.Net.Analysis.WordlistLoader">
+            <summary> Loader for text files that represent a list of stopwords.
+            
+            </summary>
+            <author>  Gerhard Schwarz
+            </author>
+            <version>  $Id: WordlistLoader.java 192989 2005-06-22 19:59:03Z dnaber $
+            </version>
+        </member>
+        <member name="M:Lucene.Net.Analysis.WordlistLoader.GetWordSet(System.IO.FileInfo)">
+            <summary> Loads a text file and adds every line as an entry to a HashSet (omitting
+            leading and trailing whitespace). Every line of the file should contain only
+            one word. The words need to be in lowercase if you make use of an
+            Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+            
+            </summary>
+            <param name="wordfile">File containing the wordlist
+            </param>
+            <returns> A HashSet with the file's words
+            </returns>
+        </member>
+        <member name="M:Lucene.Net.Analysis.WordlistLoader.GetWordSet(System.IO.TextReader)">
+            <summary> Reads lines from a Reader and adds every line as an entry to a HashSet (omitting
+            leading and trailing whitespace). Every line of the Reader should contain only
+            one word. The words need to be in lowercase if you make use of an
+            Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+            
+            </summary>
+            <param name="reader">Reader containing the wordlist
+            </param>
+            <returns> A HashSet with the reader's words
+            </returns>
+        </member>
+        <member name="M:Lucene.Net.Analysis.WordlistLoader.GetWordtable(System.String,System.String)">
+            <param name="path">     Path to the wordlist
+            </param>
+            <param name="wordfile"> Name of the wordlist
+            
+            </param>
+            <deprecated> Use {@link #GetWordSet(File)} instead
+            </deprecated>
+        </member>
+        <member name="M:Lucene.Net.Analysis.WordlistLoader.GetWordtable(System.String)">
+            <param name="wordfile"> Complete path to the wordlist
+            
+            </param>
+            <deprecated> Use {@link #GetWordSet(File)} instead
+            </deprecated>
+        </member>
+        <member name="M:Lucene.Net.Analysis.WordlistLoader.GetWordtable(System.IO.FileInfo)">
+            <param name="wordfile"> File object that points to the wordlist
+            
+            </param>
+            <deprecated> Use {@link #GetWordSet(File)} instead
+            </deprecated>
+        </member>
+        <member name="M:Lucene.Net.Analysis.WordlistLoader.MakeWordTable(System.Collections.Hashtable)">
+            <summary> Builds a wordlist table, using words as both keys and values
+            for backward compatibility.
+            
+            </summary>
+            <param name="wordSet">  stopword set
+            </param>
+        </member>
         <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Documents.DateField" -->
         <member name="M:Lucene.Net.Documents.DateField.DateToString(System.DateTime)">
             <summary> Converts a Date to a string suitable for indexing.</summary>
@@ -1113,6 +856,86 @@
         <member name="M:Lucene.Net.Documents.DateField.StringToDate(System.String)">
             <summary>Converts a string-encoded date into a Date object. </summary>
         </member>
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Documents.DateTools" -->
+        <member name="M:Lucene.Net.Documents.DateTools.DateToString(System.DateTime,Lucene.Net.Documents.DateTools.Resolution)">
+            <summary> Converts a Date to a string suitable for indexing.
+            
+            </summary>
+            <param name="date">the date to be converted
+            </param>
+            <param name="resolution">the desired resolution, see
+            {@link #Round(Date, DateTools.Resolution)}
+            </param>
+            <returns> a string in format <code>yyyyMMddHHmmssSSS</code> or shorter,
+            depeding on <code>resolution</code>
+            </returns>
+        </member>
+        <member name="M:Lucene.Net.Documents.DateTools.TimeToString(System.Int64,Lucene.Net.Documents.DateTools.Resolution)">
+            <summary> Converts a millisecond time to a string suitable for indexing.
+            
+            </summary>
+            <param name="time">the date expressed as milliseconds since January 1, 1970, 00:00:00 GMT
+            </param>
+            <param name="resolution">the desired resolution, see
+            {@link #Round(long, DateTools.Resolution)}
+            </param>
+            <returns> a string in format <code>yyyyMMddHHmmssSSS</code> or shorter,
+            depeding on <code>resolution</code>
+            </returns>
+        </member>
+        <member name="M:Lucene.Net.Documents.DateTools.StringToTime(System.String)">
+            <summary> Converts a string produced by <code>timeToString</code> or
+            <code>DateToString</code> back to a time, represented as the
+            number of milliseconds since January 1, 1970, 00:00:00 GMT.
+            
+            </summary>
+            <param name="dateString">the date string to be converted
+            </param>
+            <returns> the number of milliseconds since January 1, 1970, 00:00:00 GMT
+            </returns>
+            <throws>  ParseException if <code>dateString</code> is not in the  </throws>
+            <summary>  expected format 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.DateTools.StringToDate(System.String)">
+            <summary> Converts a string produced by <code>timeToString</code> or
+            <code>DateToString</code> back to a time, represented as a
+            Date object.
+            
+            </summary>
+            <param name="dateString">the date string to be converted
+            </param>
+            <returns> the parsed time as a Date object 
+            </returns>
+            <throws>  ParseException if <code>dateString</code> is not in the  </throws>
+            <summary>  expected format 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.DateTools.Round(System.DateTime,Lucene.Net.Documents.DateTools.Resolution)">
+            <summary> Limit a date's resolution. For example, the date <code>2004-09-21 13:50:11</code>
+            will be changed to <code>2004-09-01 00:00:00</code> when using
+            <code>Resolution.MONTH</code>. 
+            
+            </summary>
+            <param name="resolution">The desired resolution of the date to be returned
+            </param>
+            <returns> the date with all values more precise than <code>resolution</code>
+            set to 0 or 1
+            </returns>
+        </member>
+        <member name="M:Lucene.Net.Documents.DateTools.Round(System.Int64,Lucene.Net.Documents.DateTools.Resolution)">
+            <summary> Limit a date's resolution. For example, the date <code>1095767411000</code>
+            (which represents 2004-09-21 13:50:11) will be changed to 
+            <code>1093989600000</code> (2004-09-01 00:00:00) when using
+            <code>Resolution.MONTH</code>.
+            
+            </summary>
+            <param name="resolution">The desired resolution of the date to be returned
+            </param>
+            <returns> the date with all values more precise than <code>resolution</code>
+            set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT
+            </returns>
+        </member>
         <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Documents.Document" -->
         <member name="M:Lucene.Net.Documents.Document.#ctor">
             <summary>Constructs a new document with no fields. </summary>
@@ -1120,7 +943,7 @@
         <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Documents.Document.SetBoost(System.Single)" -->
         <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Documents.Document.GetBoost" -->
         <member name="M:Lucene.Net.Documents.Document.Add(Lucene.Net.Documents.Field)">
-            <summary> <p>Adds a Field to a document.  Several fields may be added with
+            <summary> <p>Adds a field to a document.  Several fields may be added with
             the same name.  In this case, if the fields are indexed, their text is
             treated as though appended for the purposes of search.</p>
             <p> Note that add like the removeField(s) methods only makes sense 
@@ -1131,9 +954,9 @@
             </summary>
         </member>
         <member name="M:Lucene.Net.Documents.Document.RemoveField(System.String)">
-            <summary> <p>Removes Field with the specified name from the document.
-            If multiple fields exist with this name, this method removes the first Field that has been added.
-            If there is no Field with the specified name, the document remains unchanged.</p>
+            <summary> <p>Removes field with the specified name from the document.
+            If multiple fields exist with this name, this method removes the first field that has been added.
+            If there is no field with the specified name, the document remains unchanged.</p>
             <p> Note that the removeField(s) methods like the add method only make sense 
             prior to adding a document to an index. These methods cannot
             be used to change the content of an existing index! In order to achieve this,
@@ -1143,7 +966,7 @@
         </member>
         <member name="M:Lucene.Net.Documents.Document.RemoveFields(System.String)">
             <summary> <p>Removes all fields with the given name from the document.
-            If there is no Field with the specified name, the document remains unchanged.</p>
+            If there is no field with the specified name, the document remains unchanged.</p>
             <p> Note that the removeField(s) methods like the add method only make sense 
             prior to adding a document to an index. These methods cannot
             be used to change the content of an existing index! In order to achieve this,
@@ -1152,15 +975,16 @@
             </summary>
         </member>
         <member name="M:Lucene.Net.Documents.Document.GetField(System.String)">
-            <summary>Returns a Field with the given name if any exist in this document, or
+            <summary>Returns a field with the given name if any exist in this document, or
             null.  If multiple fields exists with this name, this method returns the
             first value added.
             </summary>
         </member>
         <member name="M:Lucene.Net.Documents.Document.Get(System.String)">
-            <summary>Returns the string value of the Field with the given name if any exist in
+            <summary>Returns the string value of the field with the given name if any exist in
             this document, or null.  If multiple fields exist with this name, this
-            method returns the first value added.
+            method returns the first value added. If only binary fields with this name
+            exist, returns null.
             </summary>
         </member>
         <member name="M:Lucene.Net.Documents.Document.Fields">
@@ -1171,26 +995,49 @@
             This method can return <code>null</code>.
             
             </summary>
-            <param name="name">the name of the Field
+            <param name="name">the name of the field
             </param>
             <returns> a <code>Field[]</code> array
             </returns>
         </member>
         <member name="M:Lucene.Net.Documents.Document.GetValues(System.String)">
-            <summary> Returns an array of values of the Field specified as the method parameter.
+            <summary> Returns an array of values of the field specified as the method parameter.
             This method can return <code>null</code>.
             
             </summary>
-            <param name="name">the name of the Field
+            <param name="name">the name of the field
             </param>
-            <returns> a <code>String[]</code> of Field values
+            <returns> a <code>String[]</code> of field values
+            </returns>
+        </member>
+        <member name="M:Lucene.Net.Documents.Document.GetBinaryValues(System.String)">
+            <summary> Returns an array of byte arrays for of the fields that have the name specified
+            as the method parameter. This method will return <code>null</code> if no
+            binary fields with the specified name are available.
+            
+            </summary>
+            <param name="name">the name of the field
+            </param>
+            <returns> a  <code>byte[][]</code> of binary field values.
+            </returns>
+        </member>
+        <member name="M:Lucene.Net.Documents.Document.GetBinaryValue(System.String)">
+            <summary> Returns an array of bytes for the first (or only) field that has the name
+            specified as the method parameter. This method will return <code>null</code>
+            if no binary fields with the specified name are available.
+            There may be non-binary fields with the same name.
+            
+            </summary>
+            <param name="name">the name of the field.
+            </param>
+            <returns> a <code>byte[]</code> containing the binary field value.
             </returns>
         </member>
         <member name="M:Lucene.Net.Documents.Document.ToString">
             <summary>Prints the fields of a document for human consumption. </summary>
         </member>
         <member name="T:Lucene.Net.Documents.Field">
-            <summary>A Field is a section of a Document.  Each Field has two parts, a name and a
+            <summary>A field is a section of a Document.  Each field has two parts, a name and a
             value.  Values may be free text, provided as a String or as a Reader, or they
             may be atomic keywords, which are not further processed.  Such keywords may
             be used to represent dates, urls, etc.  Fields are optionally stored in the
@@ -1203,117 +1050,369 @@
             <summary>Constructs a String-valued Field that is not tokenized, but is indexed
             and stored.  Useful for non-text fields, e.g. date or url.  
             </summary>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index)
+            Field(name, value, Field.Store.YES, Field.Index.UN_TOKENIZED)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.UnIndexed(System.String,System.String)">
             <summary>Constructs a String-valued Field that is not tokenized nor indexed,
-            but is stored in the index, for return with hits. 
+            but is stored in the index, for return with hits.
             </summary>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index)
+            Field(name, value, Field.Store.YES, Field.Index.NO)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.Text(System.String,System.String)">
             <summary>Constructs a String-valued Field that is tokenized and indexed,
             and is stored in the index, for return with hits.  Useful for short text
-            fields, like "title" or "subject". Term vector will not be stored for this Field. 
+            fields, like "title" or "subject". Term vector will not be stored for this field.
             </summary>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index)
+            Field(name, value, Field.Store.YES, Field.Index.TOKENIZED)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.Keyword(System.String,System.DateTime)">
             <summary>Constructs a Date-valued Field that is not tokenized and is indexed,
-            and stored in the index, for return with hits. 
+            and stored in the index, for return with hits.
             </summary>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index)
+            Field(name, value, Field.Store.YES, Field.Index.UN_TOKENIZED)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.Text(System.String,System.String,System.Boolean)">
             <summary>Constructs a String-valued Field that is tokenized and indexed,
             and is stored in the index, for return with hits.  Useful for short text
-            fields, like "title" or "subject". 
+            fields, like "title" or "subject".
             </summary>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)
+            Field(name, value, Field.Store.YES, Field.Index.TOKENIZED, storeTermVector)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.UnStored(System.String,System.String)">
             <summary>Constructs a String-valued Field that is tokenized and indexed,
-            but that is not stored in the index.  Term vector will not be stored for this Field. 
+            but that is not stored in the index.  Term vector will not be stored for this field.
             </summary>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index)
+            Field(name, value, Field.Store.NO, Field.Index.TOKENIZED)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.UnStored(System.String,System.String,System.Boolean)">
             <summary>Constructs a String-valued Field that is tokenized and indexed,
-            but that is not stored in the index. 
+            but that is not stored in the index.
             </summary>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)
+            Field(name, value, Field.Store.NO, Field.Index.TOKENIZED, storeTermVector)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.Text(System.String,System.IO.TextReader)">
             <summary>Constructs a Reader-valued Field that is tokenized and indexed, but is
             not stored in the index verbatim.  Useful for longer text fields, like
-            "body". Term vector will not be stored for this Field. 
+            "body". Term vector will not be stored for this field.
             </summary>
+            <deprecated> use {@link #Field(String, Reader) Field(name, value)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.Text(System.String,System.IO.TextReader,System.Boolean)">
             <summary>Constructs a Reader-valued Field that is tokenized and indexed, but is
             not stored in the index verbatim.  Useful for longer text fields, like
-            "body". 
+            "body".
             </summary>
+            <deprecated> use {@link #Field(String, Reader, Field.TermVector)
+            Field(name, value, storeTermVector)} instead 
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.Name">
-            <summary>The name of the Field (e.g., "date", "subject", "title", or "body")
-            as an interned string. 
+            <summary>Returns the name of the field as an interned string.
+            For example "date", "title", "body", ...
             </summary>
         </member>
         <member name="M:Lucene.Net.Documents.Field.StringValue">
-            <summary>The value of the Field as a String, or null.  If null, the Reader value
-            is used.  Exactly one of stringValue() and readerValue() must be set. 
+            <summary>The value of the field as a String, or null.  If null, the Reader value
+            or binary value is used.  Exactly one of stringValue(), readerValue(), and
+            binaryValue() must be set. 
             </summary>
         </member>
         <member name="M:Lucene.Net.Documents.Field.ReaderValue">
-            <summary>The value of the Field as a Reader, or null.  If null, the String value
-            is used.  Exactly one of stringValue() and readerValue() must be set. 
+            <summary>The value of the field as a Reader, or null.  If null, the String value
+            or binary value is  used.  Exactly one of stringValue(), readerValue(),
+            and binaryValue() must be set. 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.BinaryValue">
+            <summary>The value of the field in Binary, or null.  If null, the Reader or
+            String value is used.  Exactly one of stringValue(), readerValue() and
+            binaryValue() must be set. 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.#ctor(System.String,System.String,Lucene.Net.Documents.Field.Store,Lucene.Net.Documents.Field.Index)">
+            <summary> Create a field by specifying its name, value and how it will
+            be saved in the index. Term vectors will not be stored in the index.
+            
+            </summary>
+            <param name="name">The name of the field
+            </param>
+            <param name="value">The string to process
+            </param>
+            <param name="store">Whether <code>value</code> should be stored in the index
+            </param>
+            <param name="index">Whether the field should be indexed, and if so, if it should
+            be tokenized before indexing 
+            </param>
+            <throws>  NullPointerException if name or value is <code>null</code> </throws>
+            <throws>  IllegalArgumentException if the field is neither stored nor indexed  </throws>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.#ctor(System.String,System.String,Lucene.Net.Documents.Field.Store,Lucene.Net.Documents.Field.Index,Lucene.Net.Documents.Field.TermVector)">
+            <summary> Create a field by specifying its name, value and how it will
+            be saved in the index.
+            
+            </summary>
+            <param name="name">The name of the field
+            </param>
+            <param name="value">The string to process
+            </param>
+            <param name="store">Whether <code>value</code> should be stored in the index
+            </param>
+            <param name="index">Whether the field should be indexed, and if so, if it should
+            be tokenized before indexing 
+            </param>
+            <param name="termVector">Whether term vector should be stored
+            </param>
+            <throws>  NullPointerException if name or value is <code>null</code> </throws>
+            <throws>  IllegalArgumentException in any of the following situations: </throws>
+            <summary> <ul> 
+            <li>the field is neither stored nor indexed</li> 
+            <li>the field is not indexed but termVector is <code>TermVector.YES</code></li>
+            </ul> 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.#ctor(System.String,System.IO.TextReader)">
+            <summary> Create a tokenized and indexed field that is not stored. Term vectors will
+            not be stored.
+            
             </summary>
+            <param name="name">The name of the field
+            </param>
+            <param name="reader">The reader with the content
+            </param>
+            <throws>  NullPointerException if name or reader is <code>null</code> </throws>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.#ctor(System.String,System.IO.TextReader,Lucene.Net.Documents.Field.TermVector)">
+            <summary> Create a tokenized and indexed field that is not stored, optionally with 
+            storing term vectors.
+            
+            </summary>
+            <param name="name">The name of the field
+            </param>
+            <param name="reader">The reader with the content
+            </param>
+            <param name="termVector">Whether term vector should be stored
+            </param>
+            <throws>  NullPointerException if name or reader is <code>null</code> </throws>
         </member>
         <member name="M:Lucene.Net.Documents.Field.#ctor(System.String,System.String,System.Boolean,System.Boolean,System.Boolean)">
-            <summary>Create a Field by specifying all parameters except for <code>storeTermVector</code>,
+            <summary>Create a field by specifying all parameters except for <code>storeTermVector</code>,
             which is set to <code>false</code>.
+            
+            </summary>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index)} instead
+            </deprecated>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.#ctor(System.String,System.Byte[],Lucene.Net.Documents.Field.Store)">
+            <summary> Create a stored field with binary value. Optionally the value may be compressed.
+            
             </summary>
+            <param name="name">The name of the field
+            </param>
+            <param name="value">The binary value
+            </param>
+            <param name="store">How <code>value</code> should be stored (compressed or not.)
+            </param>
         </member>
         <member name="M:Lucene.Net.Documents.Field.#ctor(System.String,System.String,System.Boolean,System.Boolean,System.Boolean,System.Boolean)">
             <summary> </summary>
-            <param name="name">The name of the Field
+            <param name="name">The name of the field
             </param>
             <param name="string">The string to process
             </param>
-            <param name="store">true if the Field should store the string
+            <param name="store">true if the field should store the string
             </param>
-            <param name="index">true if the Field should be indexed
+            <param name="index">true if the field should be indexed
             </param>
-            <param name="token">true if the Field should be tokenized
+            <param name="token">true if the field should be tokenized
             </param>
             <param name="storeTermVector">true if we should store the Term Vector info
+            
             </param>
+            <deprecated> use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)} instead
+            </deprecated>
         </member>
         <member name="M:Lucene.Net.Documents.Field.IsStored">
-            <summary>True iff the value of the Field is to be stored in the index for return
-            with search hits.  It is an error for this to be true if a Field is
+            <summary>True iff the value of the field is to be stored in the index for return
+            with search hits.  It is an error for this to be true if a field is
             Reader-valued. 
             </summary>
         </member>
         <member name="M:Lucene.Net.Documents.Field.IsIndexed">
-            <summary>True iff the value of the Field is to be indexed, so that it may be
+            <summary>True iff the value of the field is to be indexed, so that it may be
             searched on. 
             </summary>
         </member>
         <member name="M:Lucene.Net.Documents.Field.IsTokenized">
-            <summary>True iff the value of the Field should be tokenized as text prior to
+            <summary>True iff the value of the field should be tokenized as text prior to
             indexing.  Un-tokenized fields are indexed as a single word and may not be
             Reader-valued. 
             </summary>
         </member>
+        <member name="M:Lucene.Net.Documents.Field.IsCompressed">
+            <summary>True if the value of the field is stored and compressed within the index </summary>
+        </member>
         <member name="M:Lucene.Net.Documents.Field.IsTermVectorStored">
-            <summary>True iff the term or terms used to index this Field are stored as a term
+            <summary>True iff the term or terms used to index this field are stored as a term
             vector, available from {@link IndexReader#GetTermFreqVector(int,String)}.
-            These methods do not provide access to the original content of the Field,
+            These methods do not provide access to the original content of the field,
             only to terms used to index it. If the original content must be
             preserved, use the <code>stored</code> attribute instead.
             
             </summary>
-            <seealso cref="!:String)">
+            <seealso cref="M:Lucene.Net.Index.IndexReader.GetTermFreqVector(System.Int32,System.String)">
             </seealso>
         </member>
+        <member name="M:Lucene.Net.Documents.Field.IsStoreOffsetWithTermVector">
+            <summary> True iff terms are stored as term vector together with their offsets 
+            (start and end positon in source text).
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.IsStorePositionWithTermVector">
+            <summary> True iff terms are stored as term vector together with their token positions.</summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.IsBinary">
+            <summary>True iff the value of the filed is stored as binary </summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.GetOmitNorms">
+            <summary>True if norms are omitted for this indexed field </summary>
+        </member>
+        <member name="M:Lucene.Net.Documents.Field.SetOmitNorms(System.Boolean)">
+            <summary>Expert:
+            
+            If set, omit normalization factors associated with this indexed field.
+            This effectively disables indexing boosts and length normalization for this field.
+            </summary>
+        </member>
         <member name="M:Lucene.Net.Documents.Field.ToString">
             <summary>Prints a Field for human consumption. </summary>
         </member>
+        <member name="T:Lucene.Net.Util.Parameter">
+            <summary> A serializable Enum class.</summary>
+        </member>
+        <member name="M:Lucene.Net.Util.Parameter.ReadResolve">
+            <summary> Resolves the deserialized instance to the local reference for accurate
+            equals() and == comparisons.
+            
+            </summary>
+            <returns> a reference to Parameter as resolved in the local VM
+            </returns>
+            <throws>  ObjectStreamException </throws>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.Store.COMPRESS">
+            <summary>Store the original field value in the index in a compressed form. This is
+            useful for long documents and for binary valued fields.
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.Store.YES">
+            <summary>Store the original field value in the index. This is useful for short texts
+            like a document's title which should be displayed with the results. The
+            value is stored in its original form, i.e. no analyzer is used before it is
+            stored. 
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.Store.NO">
+            <summary>Do not store the field value in the index. </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.Index.NO">
+            <summary>Do not index the field value. This field can thus not be searched,
+            but one can still access its contents provided it is 
+            {@link Field.Store stored}. 
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.Index.TOKENIZED">
+            <summary>Index the field's value so it can be searched. An Analyzer will be used
+            to tokenize and possibly further normalize the text before its
+            terms will be stored in the index. This is useful for common text.
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.Index.UN_TOKENIZED">
+            <summary>Index the field's value without using an Analyzer, so it can be searched.
+            As no analyzer is used the value will be stored as a single term. This is
+            useful for unique Ids like product numbers.
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.Index.NO_NORMS">
+            <summary>Index the field's value without an Analyzer, and disable
+            the storing of norms.  No norms means that index-time boosting
+            and field length normalization will be disabled.  The benefit is
+            less memory usage as norms take up one byte per indexed field
+            for every document in the index.
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.TermVector.NO">
+            <summary>Do not store term vectors. </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.TermVector.YES">
+            <summary>Store the term vectors of each document. A term vector is a list
+            of the document's terms and their number of occurences in that document. 
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.TermVector.WITH_POSITIONS">
+            <summary> Store the term vector + token position information
+            
+            </summary>
+            <seealso cref="F:Lucene.Net.Documents.Field.TermVector.YES">
+            </seealso>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.TermVector.WITH_OFFSETS">
+            <summary> Store the term vector + Token offset information
+            
+            </summary>
+            <seealso cref="F:Lucene.Net.Documents.Field.TermVector.YES">
+            </seealso>
+        </member>
+        <member name="F:Lucene.Net.Documents.Field.TermVector.WITH_POSITIONS_OFFSETS">
+            <summary> Store the term vector + Token position and offset information
+            
+            </summary>

[... 5212 lines stripped ...]


Mime
View raw message