lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From d...@apache.org
Subject svn commit: r756927 [3/4] - in /incubator/lucene.net/trunk/C#/src/Lucene.Net: ./ Analysis/Standard/ Index/ Store/
Date Sat, 21 Mar 2009 12:51:45 GMT
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Lucene.Net.xml
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Lucene.Net.xml?rev=756927&r1=756926&r2=756927&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Lucene.Net.xml (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Lucene.Net.xml Sat Mar 21 12:51:41 2009
@@ -4,314 +4,505 @@
         <name>Lucene.Net</name>
     </assembly>
     <members>
-        <member name="T:Lucene.Net.Analysis.Standard.CharStream">
-            <summary> This interface describes a character stream that maintains line and
-            column number positions of the characters.  It also has the capability
-            to backup the stream to some extent.  An implementation of this
-            interface is used in the TokenManager implementation generated by
-            JavaCCParser.
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Store.VerifyingLockFactory" -->
+        <member name="T:Lucene.Net.Store.LockFactory">
+            <summary> <p>Base class for Locking implementation.  {@link Directory} uses
+            instances of this class to implement locking.</p>
+            
+            <p>Note that there are some useful tools to verify that
+            your LockFactory is working correctly: {@link
+            VerifyingLockFactory}, {@link LockStressTest}, {@link
+            LockVerifyServer}.</p>
             
-            All the methods except backup can be implemented in any fashion. backup
-            needs to be implemented correctly for the correct operation of the lexer.
-            Rest of the methods are all used to get information like line number,
-            column number and the String that constitutes a token and are not used
-            by the lexer. Hence their implementation won't affect the generated lexer's
-            operation.
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.ReadChar">
-            <summary> Returns the next character from the selected input.  The method
-            of selecting the input is the responsibility of the class
-            implementing this interface.  Can throw any java.io.IOException.
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.GetEndColumn">
-            <summary> Returns the column number of the last character for current token (being
-            matched after the last call to BeginTOken).
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.GetEndLine">
-            <summary> Returns the line number of the last character for current token (being
-            matched after the last call to BeginTOken).
             </summary>
+            <seealso cref="T:Lucene.Net.Store.LockVerifyServer">
+            </seealso>
+            <seealso cref="T:Lucene.Net.Store.LockStressTest">
+            </seealso>
+            <seealso cref="T:Lucene.Net.Store.VerifyingLockFactory">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.GetBeginColumn">
-            <summary> Returns the column number of the first character for current token (being
-            matched after the last call to BeginTOken).
+        <member name="M:Lucene.Net.Store.LockFactory.SetLockPrefix(System.String)">
+            <summary> Set the prefix in use for all locks created in this
+            LockFactory.  This is normally called once, when a
+            Directory gets this LockFactory instance.  However, you
+            can also call this (after this instance is assigned to
+            a Directory) to override the prefix in use.  This
+            is helpful if you're running Lucene on machines that
+            have different mount points for the same shared
+            directory.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.GetBeginLine">
-            <summary> Returns the line number of the first character for current token (being
-            matched after the last call to BeginTOken).
-            </summary>
+        <member name="M:Lucene.Net.Store.LockFactory.GetLockPrefix">
+            <summary> Get the prefix in use for all locks created in this LockFactory.</summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.Backup(System.Int32)">
-            <summary> Backs up the input stream by amount steps. Lexer calls this method if it
-            had already read some characters, but could not use them to match a
-            (longer) token. So, they will be used again as the prefix of the next
-            token and it is the implemetation's responsibility to do this right.
-            </summary>
+        <member name="M:Lucene.Net.Store.LockFactory.MakeLock(System.String)">
+            <summary> Return a new Lock instance identified by lockName.</summary>
+            <param name="lockName">name of the lock to be created.
+            </param>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.BeginToken">
-            <summary> Returns the next character that marks the beginning of the next token.
-            All characters must remain in the buffer between two successive calls
-            to this method to implement backup correctly.
+        <member name="M:Lucene.Net.Store.LockFactory.ClearLock(System.String)">
+            <summary> Attempt to clear (forcefully unlock and remove) the
+            specified lock.  Only call this at a time when you are
+            certain this lock is no longer in use.
             </summary>
+            <param name="lockName">name of the lock to be cleared.
+            </param>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.GetImage">
-            <summary> Returns a string made up of characters from the marked token beginning 
-            to the current buffer position. Implementations have the choice of returning
-            anything that they want to. For example, for efficiency, one might decide
-            to just return null, which is a valid implementation.
-            </summary>
+        <member name="M:Lucene.Net.Store.VerifyingLockFactory.#ctor(System.Byte,Lucene.Net.Store.LockFactory,System.String,System.Int32)">
+            <param name="id">should be a unique id across all clients
+            </param>
+            <param name="lf">the LockFactory that we are testing
+            </param>
+            <param name="host">host or IP where {@link LockVerifyServer}
+            is running
+            </param>
+            <param name="port">the port {@link LockVerifyServer} is
+            listening on
+            </param>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.GetSuffix(System.Int32)">
-            <summary> Returns an array of characters that make up the suffix of length 'len' for
-            the currently matched token. This is used to build up the matched string
-            for use in actions in the case of MORE. A simple and inefficient
-            implementation of this is as follows :
-            
-            {
-            String t = GetImage();
-            return t.substring(t.length() - len, t.length()).toCharArray();
-            }
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Store.Lock" -->
+        <member name="F:Lucene.Net.Store.Lock.LOCK_OBTAIN_WAIT_FOREVER">
+            <summary>Pass this value to {@link #Obtain(long)} to try
+            forever to obtain the lock. 
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.CharStream.Done">
-            <summary> The lexer calls this function to indicate that it is done with the stream
-            and hence implementations can free any resources held by this class.
-            Again, the body of this function can be just empty and it will not
-            affect the lexer's operation.
+        <member name="F:Lucene.Net.Store.Lock.LOCK_POLL_INTERVAL">
+            <summary>How long {@link #Obtain(long)} waits, in milliseconds,
+            in between attempts to acquire the lock. 
             </summary>
         </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.Standard.FastCharStream" -->
-        <member name="M:Lucene.Net.Analysis.Standard.FastCharStream.#ctor(System.IO.TextReader)">
-            <summary>Constructs from a Reader. </summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.Standard.ParseException">
-            <summary> This exception is thrown when parse errors are encountered.
-            You can explicitly create objects of this exception type by
-            calling the method generateParseException in the generated
-            parser.
-            
-            You can modify this class to customize your error reporting
-            mechanisms so long as you retain the public fields.
+        <member name="M:Lucene.Net.Store.Lock.Obtain">
+            <summary>Attempts to obtain exclusive access and immediately return
+            upon success or failure.
             </summary>
+            <returns> true iff exclusive access is obtained
+            </returns>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.ParseException.#ctor(Lucene.Net.Analysis.Standard.Token,System.Int32[][],System.String[])">
-            <summary> This constructor is used by the method "generateParseException"
-            in the generated parser.  Calling this constructor generates
-            a new object of this type with the fields "currentToken",
-            "expectedTokenSequences", and "tokenImage" set.  The boolean
-            flag "specialConstructor" is also set to true to indicate that
-            this constructor was used to create this object.
-            This constructor calls its super class with the empty string
-            to force the "toString" method of parent class "Throwable" to
-            print the error message in the form:
-            ParseException: &lt;result of getMessage&gt;
+        <member name="F:Lucene.Net.Store.Lock.failureReason">
+            <summary> If a lock obtain called, this failureReason may be set
+            with the "root cause" Exception as to why the lock was
+            not obtained.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.ParseException.#ctor">
-            <summary> The following constructors are for use by you for whatever
-            purpose you can think of.  Constructing the exception in this
-            manner makes the exception behave in the normal way - i.e., as
-            documented in the class "Throwable".  The fields "errorToken",
-            "expectedTokenSequences", and "tokenImage" do not contain
-            relevant information.  The JavaCC generated code does not use
-            these constructors.
+        <member name="M:Lucene.Net.Store.Lock.Obtain(System.Int64)">
+            <summary>Attempts to obtain an exclusive lock within amount of
+            time given. Polls once per {@link #LOCK_POLL_INTERVAL}
+            (currently 1000) milliseconds until lockWaitTimeout is
+            passed.
+            </summary>
+            <param name="lockWaitTimeout">length of time to wait in
+            milliseconds or {@link
+            #LOCK_OBTAIN_WAIT_FOREVER} to retry forever
+            </param>
+            <returns> true if lock was obtained
+            </returns>
+            <throws>  LockObtainFailedException if lock wait times out </throws>
+            <throws>  IllegalArgumentException if lockWaitTimeout is </throws>
+            <summary>         out of bounds
             </summary>
+            <throws>  IOException if obtain() throws IOException </throws>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.ParseException.specialConstructor">
-            <summary> This variable determines which constructor was used to create
-            this object and thereby affects the semantics of the
-            "getMessage" method (see below).
-            </summary>
+        <member name="M:Lucene.Net.Store.Lock.Release">
+            <summary>Releases exclusive access. </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.ParseException.currentToken">
-            <summary> This is the last token that has been consumed successfully.  If
-            this object has been created due to a parse error, the token
-            followng this token will (therefore) be the first error token.
+        <member name="M:Lucene.Net.Store.Lock.IsLocked">
+            <summary>Returns true if the resource is currently locked.  Note that one must
+            still call {@link #Obtain()} before using the resource. 
             </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.ParseException.expectedTokenSequences">
-            <summary> Each entry in this array is an array of integers.  Each array
-            of integers represents a sequence of tokens (by their ordinal
-            values) that is expected at this point of the parse.
-            </summary>
+        <member name="T:Lucene.Net.Store.Lock.With">
+            <summary>Utility class for executing code with exclusive access. </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.ParseException.tokenImage">
-            <summary> This is a reference to the "tokenImage" array of the generated
-            parser within which the parse error occurred.  This array is
-            defined in the generated ...Constants interface.
-            </summary>
+        <member name="M:Lucene.Net.Store.Lock.With.#ctor(Lucene.Net.Store.Lock,System.Int64)">
+            <summary>Constructs an executor that will grab the named lock. </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.ParseException.eol">
-            <summary> The end of line string for this machine.</summary>
+        <member name="M:Lucene.Net.Store.Lock.With.DoBody">
+            <summary>Code to execute with exclusive access. </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.ParseException.Add_escapes(System.String)">
-            <summary> Used to convert raw characters to their escaped version
-            when these raw version cannot be used as part of an ASCII
-            string literal.
+        <member name="M:Lucene.Net.Store.Lock.With.Run">
+            <summary>Calls {@link #doBody} while <i>lock</i> is obtained.  Blocks if lock
+            cannot be obtained immediately.  Retries to obtain lock once per second
+            until it is obtained, or until it has tried ten times. Lock is released when
+            {@link #doBody} exits.
             </summary>
-        </member>
-        <member name="P:Lucene.Net.Analysis.Standard.ParseException.Message">
-            <summary> This method has the standard behavior when this object has been
-            created using the standard constructors.  Otherwise, it uses
-            "currentToken" and "expectedTokenSequences" to generate a parse
-            error message and returns it.  If this object has been created
-            due to a parse error, and you do not catch it (it gets thrown
-            from the parser), then this method is called during the printing
-            of the final stack trace, and hence the correct error message
-            gets displayed.
+            <throws>  LockObtainFailedException if lock could not </throws>
+            <summary> be obtained
             </summary>
+            <throws>  IOException if {@link Lock#obtain} throws IOException </throws>
         </member>
-        <member name="T:Lucene.Net.Analysis.Standard.StandardAnalyzer">
-            <summary> Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
-            LowerCaseFilter} and {@link StopFilter}, using a list of English stop words.
+        <member name="T:Lucene.Net.Store.RAMInputStream">
+            <summary> A memory-resident {@link IndexInput} implementation.
             
             </summary>
-            <version>  $Id: StandardAnalyzer.java 219090 2005-07-14 20:36:28Z dnaber $
+            <version>  $Id: RAMInputStream.java 598693 2007-11-27 17:01:21Z mikemccand $
             </version>
         </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.Analyzer" -->
-        <member name="M:Lucene.Net.Analysis.Analyzer.TokenStream(System.String,System.IO.TextReader)">
-            <summary>Creates a TokenStream which tokenizes all the text in the provided
-            Reader.  Default implementation forwards to tokenStream(Reader) for 
-            compatibility with older version.  Override to allow Analyzer to choose 
-            strategy based on document and/or field.  Must be able to handle null
-            field name for backward compatibility. 
+        <member name="T:Lucene.Net.Store.IndexInput">
+            <summary>Abstract base class for input from a file in a {@link Directory}.  A
+            random-access input stream.  Used for all Lucene index input operations.
             </summary>
+            <seealso cref="T:Lucene.Net.Store.Directory">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.Analyzer.GetPositionIncrementGap(System.String)">
-            <summary> Invoked before indexing a Field instance if
-            terms have already been added to that field.  This allows custom
-            analyzers to place an automatic position increment gap between
-            Field instances using the same field name.  The default value
-            position increment gap is 0.  With a 0 position increment gap and
-            the typical default token position increment of 1, all terms in a field,
-            including across Field instances, are in successive positions, allowing
-            exact PhraseQuery matches, for instance, across Field instance boundaries.
-            
-            </summary>
-            <param name="fieldName">Field name being indexed.
+        <member name="M:Lucene.Net.Store.IndexInput.ReadByte">
+            <summary>Reads and returns a single byte.</summary>
+            <seealso cref="M:Lucene.Net.Store.IndexOutput.WriteByte(System.Byte)">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Store.IndexInput.ReadBytes(System.Byte[],System.Int32,System.Int32)">
+            <summary>Reads a specified number of bytes into an array at the specified offset.</summary>
+            <param name="b">the array to read bytes into
             </param>
-            <returns> position increment gap, added to the next token emitted from {@link #TokenStream(String,Reader)}
-            </returns>
+            <param name="offset">the offset in the array to start storing bytes
+            </param>
+            <param name="len">the number of bytes to read
+            </param>
+            <seealso cref="M:Lucene.Net.Store.IndexOutput.WriteBytes(System.Byte[],System.Int32)">
+            </seealso>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.StandardAnalyzer.STOP_WORDS">
-            <summary>An array containing some common English words that are usually not
-            useful for searching. 
+        <member name="M:Lucene.Net.Store.IndexInput.ReadBytes(System.Byte[],System.Int32,System.Int32,System.Boolean)">
+            <summary>Reads a specified number of bytes into an array at the
+            specified offset with control over whether the read
+            should be buffered (callers who have their own buffer
+            should pass in "false" for useBuffer).  Currently only
+            {@link BufferedIndexInput} respects this parameter.
             </summary>
+            <param name="b">the array to read bytes into
+            </param>
+            <param name="offset">the offset in the array to start storing bytes
+            </param>
+            <param name="len">the number of bytes to read
+            </param>
+            <param name="useBuffer">set to false if the caller will handle
+            buffering.
+            </param>
+            <seealso cref="M:Lucene.Net.Store.IndexOutput.WriteBytes(System.Byte[],System.Int32)">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor">
-            <summary>Builds an analyzer with the default stop words ({@link #STOP_WORDS}). </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor(System.Collections.Hashtable)">
-            <summary>Builds an analyzer with the given stop words. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor(System.String[])">
-            <summary>Builds an analyzer with the given stop words. </summary>
+        <member name="M:Lucene.Net.Store.IndexInput.ReadInt">
+            <summary>Reads four bytes and returns an int.</summary>
+            <seealso cref="M:Lucene.Net.Store.IndexOutput.WriteInt(System.Int32)">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor(System.IO.FileInfo)">
-            <summary>Builds an analyzer with the stop words from the given file.</summary>
-            <seealso cref="!:WordlistLoader.GetWordSet(File)">
+        <member name="M:Lucene.Net.Store.IndexInput.ReadVInt">
+            <summary>Reads an int stored in variable-length format.  Reads between one and
+            five bytes.  Smaller values take fewer bytes.  Negative numbers are not
+            supported.
+            </summary>
+            <seealso cref="M:Lucene.Net.Store.IndexOutput.WriteVInt(System.Int32)">
             </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.#ctor(System.IO.TextReader)">
-            <summary>Builds an analyzer with the stop words from the given reader.</summary>
-            <seealso cref="!:WordlistLoader.GetWordSet(Reader)">
+        <member name="M:Lucene.Net.Store.IndexInput.ReadLong">
+            <summary>Reads eight bytes and returns a long.</summary>
+            <seealso cref="!:IndexOutput#WriteLong(long)">
             </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardAnalyzer.TokenStream(System.String,System.IO.TextReader)">
-            <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
-            StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
+        <member name="M:Lucene.Net.Store.IndexInput.ReadVLong">
+            <summary>Reads a long stored in variable-length format.  Reads between one and
+            nine bytes.  Smaller values take fewer bytes.  Negative numbers are not
+            supported. 
             </summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.Standard.StandardFilter">
-            <summary>Normalizes tokens extracted with {@link StandardTokenizer}. </summary>
+        <member name="M:Lucene.Net.Store.IndexInput.ReadString">
+            <summary>Reads a string.</summary>
+            <seealso cref="M:Lucene.Net.Store.IndexOutput.WriteString(System.String)">
+            </seealso>
         </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.TokenFilter" -->
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.TokenStream" -->
-        <member name="M:Lucene.Net.Analysis.TokenStream.Next">
-            <summary>Returns the next token in the stream, or null at EOS. </summary>
+        <member name="M:Lucene.Net.Store.IndexInput.ReadChars(System.Char[],System.Int32,System.Int32)">
+            <summary>Reads UTF-8 encoded characters into an array.</summary>
+            <param name="buffer">the array to read characters into
+            </param>
+            <param name="start">the offset in the array to start storing characters
+            </param>
+            <param name="length">the number of characters to read
+            </param>
+            <seealso cref="M:Lucene.Net.Store.IndexOutput.WriteChars(System.String,System.Int32,System.Int32)">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.TokenStream.Close">
-            <summary>Releases resources associated with this stream. </summary>
+        <member name="M:Lucene.Net.Store.IndexInput.SkipChars(System.Int32)">
+            <summary> Expert
+            
+            Similar to {@link #ReadChars(char[], int, int)} but does not do any conversion operations on the bytes it is reading in.  It still
+            has to invoke {@link #ReadByte()} just as {@link #ReadChars(char[], int, int)} does, but it does not need a buffer to store anything
+            and it does not have to do any of the bitwise operations, since we don't actually care what is in the byte except to determine
+            how many more bytes to read
+            </summary>
+            <param name="length">The number of chars to read
+            </param>
         </member>
-        <member name="F:Lucene.Net.Analysis.TokenFilter.input">
-            <summary>The source of tokens for this filter. </summary>
+        <member name="M:Lucene.Net.Store.IndexInput.Close">
+            <summary>Closes the stream to futher operations. </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.TokenFilter.#ctor(Lucene.Net.Analysis.TokenStream)">
-            <summary>Construct a token stream filtering the given input. </summary>
+        <member name="M:Lucene.Net.Store.IndexInput.GetFilePointer">
+            <summary>Returns the current position in this file, where the next read will
+            occur.
+            </summary>
+            <seealso cref="M:Lucene.Net.Store.IndexInput.Seek(System.Int64)">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.TokenFilter.Close">
-            <summary>Close the input TokenStream. </summary>
+        <member name="M:Lucene.Net.Store.IndexInput.Seek(System.Int64)">
+            <summary>Sets current position in this file, where the next read will occur.</summary>
+            <seealso cref="M:Lucene.Net.Store.IndexInput.GetFilePointer">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardFilter.#ctor(Lucene.Net.Analysis.TokenStream)">
-            <summary>Construct filtering <i>in</i>. </summary>
+        <member name="M:Lucene.Net.Store.IndexInput.Length">
+            <summary>The number of bytes in the file. </summary>
         </member>
-        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Analysis.Standard.StandardFilter.Next" -->
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.Standard.StandardTokenizer" -->
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.Tokenizer" -->
-        <member name="F:Lucene.Net.Analysis.Tokenizer.input">
-            <summary>The text source for this Tokenizer. </summary>
+        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Store.IndexInput.Clone" -->
+        <member name="T:Lucene.Net.Search.Spans.SpanNearQuery">
+            <summary>Matches spans which are near one another.  One can specify <i>slop</i>, the
+            maximum number of intervening unmatched positions, as well as whether
+            matches are required to be in-order. 
+            </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Tokenizer.#ctor">
-            <summary>Construct a tokenizer with null input. </summary>
+        <member name="T:Lucene.Net.Search.Spans.SpanQuery">
+            <summary>Base class for span-based queries. </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Tokenizer.#ctor(System.IO.TextReader)">
-            <summary>Construct a token stream processing the given input. </summary>
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Search.Query" -->
+        <member name="M:Lucene.Net.Search.Query.SetBoost(System.Single)">
+            <summary>Sets the boost for this query clause to <code>b</code>.  Documents
+            matching this clause will (in addition to the normal weightings) have
+            their score multiplied by <code>b</code>.
+            </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Tokenizer.Close">
-            <summary>By default, closes the input Reader. </summary>
+        <member name="M:Lucene.Net.Search.Query.GetBoost">
+            <summary>Gets the boost for this clause.  Documents matching
+            this clause will (in addition to the normal weightings) have their score
+            multiplied by <code>b</code>.   The boost is 1.0 by default.
+            </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardTokenizer.#ctor(System.IO.TextReader)">
-            <summary>Constructs a tokenizer for this Reader. </summary>
+        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Search.Query.ToString(System.String)" -->
+        <member name="M:Lucene.Net.Search.Query.ToString">
+            <summary>Prints a query to a string. </summary>
         </member>
-        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Analysis.Standard.StandardTokenizer.Next" -->
-        <member name="M:Lucene.Net.Analysis.Standard.StandardTokenizer.Close">
-            <summary>By default, closes the input Reader. </summary>
+        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Search.Query.CreateWeight(Lucene.Net.Search.Searcher)" -->
+        <member name="M:Lucene.Net.Search.Query.Weight(Lucene.Net.Search.Searcher)">
+            <summary>Expert: Constructs and initializes a Weight for a top-level query. </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.StandardTokenizerTokenManager.Close">
-            <summary>By default, closes the input Reader. </summary>
+        <member name="M:Lucene.Net.Search.Query.Rewrite(Lucene.Net.Index.IndexReader)">
+            <summary>Expert: called to re-write queries into primitive queries. For example,
+            a PrefixQuery will be rewritten into a BooleanQuery that consists
+            of TermQuerys.
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Query.Combine(Lucene.Net.Search.Query[])">
+            <summary>Expert: called when re-writing queries under MultiSearcher.
+            
+            Create a single query suitable for use by all subsearchers (in 1-1
+            correspondence with queries). This is an optimization of the OR of
+            all queries. We handle the common optimization cases of equal
+            queries and overlapping clauses of boolean OR queries (as generated
+            by MultiTermQuery.rewrite() and RangeQuery.rewrite()).
+            Be careful overriding this method as queries[0] determines which
+            method will be called and is not necessarily of the same type as
+            the other queries.
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Query.ExtractTerms(System.Collections.Hashtable)">
+            <summary> Expert: adds all terms occuring in this query to the terms set. Only
+            works if this query is in its {@link #rewrite rewritten} form.
+            
+            </summary>
+            <throws>  UnsupportedOperationException if this query is not yet rewritten </throws>
+        </member>
+        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Search.Query.MergeBooleanQueries(Lucene.Net.Search.Query[])" -->
+        <member name="M:Lucene.Net.Search.Query.GetSimilarity(Lucene.Net.Search.Searcher)">
+            <summary>Expert: Returns the Similarity implementation to be used for this query.
+            Subclasses may override this method to specify their own Similarity
+            implementation, perhaps one that delegates through that of the Searcher.
+            By default the Searcher's Similarity implementation is returned.
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Query.Clone">
+            <summary>Returns a clone of this query. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanQuery.GetSpans(Lucene.Net.Index.IndexReader)">
+            <summary>Expert: Returns the matches for this query in an index.  Used internally
+            to search for spans. 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanQuery.GetField">
+            <summary>Returns the name of the field matched by this query.</summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanQuery.GetTerms">
+            <summary>Returns a collection of all terms matched by this query.</summary>
+            <deprecated> use extractTerms instead
+            </deprecated>
+            <seealso cref="!:Query#ExtractTerms(Set)">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanNearQuery.#ctor(Lucene.Net.Search.Spans.SpanQuery[],System.Int32,System.Boolean)">
+            <summary>Construct a SpanNearQuery.  Matches spans matching a span from each
+            clause, with up to <code>slop</code> total unmatched positions between
+            them.  * When <code>inOrder</code> is true, the spans from each clause
+            must be * ordered as in <code>clauses</code>. 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanNearQuery.GetClauses">
+            <summary>Return the clauses whose spans are matched. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanNearQuery.GetSlop">
+            <summary>Return the maximum number of intervening unmatched positions permitted.</summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanNearQuery.IsInOrder">
+            <summary>Return true if matches are required to be in-order.</summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanNearQuery.GetTerms">
+            <summary>Returns a collection of all terms matched by this query.</summary>
+            <deprecated> use extractTerms instead
+            </deprecated>
+            <seealso cref="!:#ExtractTerms(Set)">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Search.Spans.SpanNearQuery.Equals(System.Object)">
+            <summary>Returns true iff <code>o</code> is equal to this. </summary>
+        </member>
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Search.Sort" -->
+        <member name="F:Lucene.Net.Search.Sort.RELEVANCE">
+            <summary> Represents sorting by computed relevance. Using this sort criteria returns
+            the same results as calling
+            {@link Searcher#Search(Query) Searcher#search()}without a sort criteria,
+            only with slightly more overhead.
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Search.Sort.INDEXORDER">
+            <summary>Represents sorting by index order. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.#ctor">
+            <summary> Sorts by computed relevance. This is the same sort criteria as calling
+            {@link Searcher#Search(Query) Searcher#search()}without a sort criteria,
+            only with slightly more overhead.
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.#ctor(System.String)">
+            <summary> Sorts by the terms in <code>field</code> then by index order (document
+            number). The type of value in <code>field</code> is determined
+            automatically.
+            
+            </summary>
+            <seealso cref="!:SortField#AUTO">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.#ctor(System.String,System.Boolean)">
+            <summary> Sorts possibly in reverse by the terms in <code>field</code> then by
+            index order (document number). The type of value in <code>field</code> is
+            determined automatically.
+            
+            </summary>
+            <seealso cref="!:SortField#AUTO">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.#ctor(System.String[])">
+            <summary> Sorts in succession by the terms in each field. The type of value in
+            <code>field</code> is determined automatically.
+            
+            </summary>
+            <seealso cref="!:SortField#AUTO">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.#ctor(Lucene.Net.Search.SortField)">
+            <summary>Sorts by the criteria in the given SortField. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.#ctor(Lucene.Net.Search.SortField[])">
+            <summary>Sorts in succession by the criteria in each SortField. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.SetSort(System.String)">
+            <summary> Sets the sort to the terms in <code>field</code> then by index order
+            (document number).
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.SetSort(System.String,System.Boolean)">
+            <summary> Sets the sort to the terms in <code>field</code> possibly in reverse,
+            then by index order (document number).
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.SetSort(System.String[])">
+            <summary>Sets the sort to the terms in each field in succession. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.SetSort(Lucene.Net.Search.SortField)">
+            <summary>Sets the sort to the given criteria. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.SetSort(Lucene.Net.Search.SortField[])">
+            <summary>Sets the sort to the given criteria in succession. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Sort.GetSort">
+            <summary> Representation of the sort criteria.</summary>
+            <returns> Array of SortField objects used in this sort criteria
+            </returns>
+        </member>
+        <member name="T:Lucene.Net.Search.Filter">
+            <summary>Abstract base class providing a mechanism to restrict searches to a subset
+            of an index. 
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.Filter.Bits(Lucene.Net.Index.IndexReader)">
+            <summary>Returns a BitSet with true for documents which should be permitted in
+            search results, and false for those that should not. 
+            </summary>
+        </member>
+        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Search.ConstantScoreRangeQuery" -->
+        <member name="M:Lucene.Net.Search.ConstantScoreRangeQuery.GetField">
+            <summary>Returns the field name for this query </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.ConstantScoreRangeQuery.GetLowerVal">
+            <summary>Returns the value of the lower endpoint of this range query, null if open ended </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.ConstantScoreRangeQuery.GetUpperVal">
+            <summary>Returns the value of the upper endpoint of this range query, null if open ended </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.ConstantScoreRangeQuery.IncludesLower">
+            <summary>Returns <code>true</code> if the lower endpoint is inclusive </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.ConstantScoreRangeQuery.IncludesUpper">
+            <summary>Returns <code>true</code> if the upper endpoint is inclusive </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.ConstantScoreRangeQuery.ToString(System.String)">
+            <summary>Prints a user-readable version of this query. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.ConstantScoreRangeQuery.Equals(System.Object)">
+            <summary>Returns true if <code>o</code> is equal to this. </summary>
+        </member>
+        <member name="M:Lucene.Net.Search.ConstantScoreRangeQuery.GetHashCode">
+            <summary>Returns a hash code value for this object.</summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.Standard.Token">
+        <member name="T:Lucene.Net.QueryParsers.Token">
             <summary> Describes the input token stream.</summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.Token.kind">
+        <member name="F:Lucene.Net.QueryParsers.Token.kind">
             <summary> An integer that describes the kind of this token.  This numbering
             system is determined by JavaCCParser, and a table of these numbers is
             stored in the file ...Constants.java.
             </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.Token.beginLine">
+        <member name="F:Lucene.Net.QueryParsers.Token.beginLine">
             <summary> beginLine and beginColumn describe the position of the first character
             of this token; endLine and endColumn describe the position of the
             last character of this token.
             </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.Token.beginColumn">
+        <member name="F:Lucene.Net.QueryParsers.Token.beginColumn">
             <summary> beginLine and beginColumn describe the position of the first character
             of this token; endLine and endColumn describe the position of the
             last character of this token.
             </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.Token.endLine">
+        <member name="F:Lucene.Net.QueryParsers.Token.endLine">
             <summary> beginLine and beginColumn describe the position of the first character
             of this token; endLine and endColumn describe the position of the
             last character of this token.
             </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.Token.endColumn">
+        <member name="F:Lucene.Net.QueryParsers.Token.endColumn">
             <summary> beginLine and beginColumn describe the position of the first character
             of this token; endLine and endColumn describe the position of the
             last character of this token.
             </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.Token.image">
+        <member name="F:Lucene.Net.QueryParsers.Token.image">
             <summary> The string image of the token.</summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.Token.next">
+        <member name="F:Lucene.Net.QueryParsers.Token.next">
             <summary> A reference to the next regular (non-special) token from the input
             stream.  If this is the last token from the input stream, or if the
             token manager has not read tokens beyond this one, this field is
@@ -320,7 +511,7 @@
             this field.
             </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.Token.specialToken">
+        <member name="F:Lucene.Net.QueryParsers.Token.specialToken">
             <summary> This field is used to access special tokens that occur prior to this
             token, but after the immediately preceding regular (non-special) token.
             If there are no such special tokens, this field is set to null.
@@ -333,10 +524,10 @@
             is no such token, this field is null.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.Token.ToString">
+        <member name="M:Lucene.Net.QueryParsers.Token.ToString">
             <summary> Returns the image.</summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.Token.NewToken(System.Int32)">
+        <member name="M:Lucene.Net.QueryParsers.Token.NewToken(System.Int32)">
             <summary> Returns a new Token object, by default. However, if you want, you
             can create and return subclass objects based on the value of ofKind.
             Simply add the cases to the switch for all those special cases.
@@ -349,913 +540,965 @@
             variable to the appropriate type and use it in your lexical actions.
             </summary>
         </member>
-        <member name="F:Lucene.Net.Analysis.Standard.TokenMgrError.LEXICAL_ERROR">
-            <summary> Lexical error occured.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.Standard.TokenMgrError.STATIC_LEXER_ERROR">
-            <summary> An attempt wass made to create a second instance of a static token manager.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.Standard.TokenMgrError.INVALID_LEXICAL_STATE">
-            <summary> Tried to change to an invalid lexical state.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.Standard.TokenMgrError.LOOP_DETECTED">
-            <summary> Detected (and bailed out of) an infinite loop in the token manager.</summary>
-        </member>
-        <member name="F:Lucene.Net.Analysis.Standard.TokenMgrError.errorCode">
-            <summary> Indicates the reason why the exception is thrown. It will have
-            one of the above 4 values.
+        <member name="T:Lucene.Net.Index.SortedTermVectorMapper">
+            <summary> Store a sorted collection of {@link Lucene.Net.Index.TermVectorEntry}s.  Collects all term information
+            into a single, SortedSet.
+            <br/>
+            NOTE: This Mapper ignores all Field information for the Document.  This means that if you are using offset/positions you will not
+            know what Fields they correlate with.
+            <br/>
+            This is not thread-safe  
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.TokenMgrError.addEscapes(System.String)">
-            <summary> Replaces unprintable characters by their espaced (or unicode escaped)
-            equivalents in the given string
+        <member name="T:Lucene.Net.Index.TermVectorMapper">
+            <summary> The TermVectorMapper can be used to map Term Vectors into your own
+            structure instead of the parallel array structure used by
+            {@link Lucene.Net.Index.IndexReader#GetTermFreqVector(int,String)}.
+            <p/>
+            It is up to the implementation to make sure it is thread-safe.
+            
+            
+            
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.Standard.TokenMgrError.LexicalError(System.Boolean,System.Int32,System.Int32,System.Int32,System.String,System.Char)">
-            <summary> Returns a detailed message for the Error when it is thrown by the
-            token manager to indicate a lexical error.
-            Parameters : 
-            EOFSeen     : indicates if EOF caused the lexicl error
-            curLexState : lexical state in which this error occured
-            errorLine   : line number when the error occured
-            errorColumn : column number when the error occured
-            errorAfter  : prefix that was seen before this error occured
-            curchar     : the offending character
-            Note: You can customize the lexical error message by modifying this method.
-            </summary>
+        <member name="M:Lucene.Net.Index.TermVectorMapper.#ctor(System.Boolean,System.Boolean)">
+            <summary> </summary>
+            <param name="ignoringPositions">true if this mapper should tell Lucene to ignore positions even if they are stored
+            </param>
+            <param name="ignoringOffsets">similar to ignoringPositions
+            </param>
         </member>
-        <member name="P:Lucene.Net.Analysis.Standard.TokenMgrError.Message">
-            <summary> You can also modify the body of this method to customize your error messages.
-            For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
-            of end-users concern, so you can return something like : 
-            
-            "Internal Error : Please file a bug report .... "
+        <member name="M:Lucene.Net.Index.TermVectorMapper.SetExpectations(System.String,System.Int32,System.Boolean,System.Boolean)">
+            <summary> Tell the mapper what to expect in regards to field, number of terms, offset and position storage.
+            This method will be called once before retrieving the vector for a field.
             
-            from this method for such cases in the release version of your parser.
+            This method will be called before {@link #Map(String,int,TermVectorOffsetInfo[],int[])}.
             </summary>
+            <param name="field">The field the vector is for
+            </param>
+            <param name="numTerms">The number of terms that need to be mapped
+            </param>
+            <param name="storeOffsets">true if the mapper should expect offset information
+            </param>
+            <param name="storePositions">true if the mapper should expect positions info
+            </param>
         </member>
-        <member name="T:Lucene.Net.Analysis.CharTokenizer">
-            <summary>An abstract base class for simple, character-oriented tokenizers.</summary>
+        <member name="M:Lucene.Net.Index.TermVectorMapper.Map(System.String,System.Int32,Lucene.Net.Index.TermVectorOffsetInfo[],System.Int32[])">
+            <summary> Map the Term Vector information into your own structure</summary>
+            <param name="term">The term to add to the vector
+            </param>
+            <param name="frequency">The frequency of the term in the document
+            </param>
+            <param name="offsets">null if the offset is not specified, otherwise the offset into the field of the term
+            </param>
+            <param name="positions">null if the position is not specified, otherwise the position in the field of the term
+            </param>
         </member>
-        <member name="M:Lucene.Net.Analysis.CharTokenizer.IsTokenChar(System.Char)">
-            <summary>Returns true iff a character should be included in a token.  This
-            tokenizer generates as tokens adjacent sequences of characters which
-            satisfy this predicate.  Characters for which this is false are used to
-            define token boundaries and are not included in tokens. 
+        <member name="M:Lucene.Net.Index.TermVectorMapper.IsIgnoringPositions">
+            <summary> Indicate to Lucene that even if there are positions stored, this mapper is not interested in them and they
+            can be skipped over.  Derived classes should set this to true if they want to ignore positions.  The default
+            is false, meaning positions will be loaded if they are stored.
             </summary>
+            <returns> false
+            </returns>
         </member>
-        <member name="M:Lucene.Net.Analysis.CharTokenizer.Normalize(System.Char)">
-            <summary>Called on each token character to normalize it before it is added to the
-            token.  The default implementation does nothing.  Subclasses may use this
-            to, e.g., lowercase tokens. 
-            </summary>
+        <member name="M:Lucene.Net.Index.TermVectorMapper.IsIgnoringOffsets">
+            <summary> </summary>
+            <seealso cref="!:IsIgnoringPositions() Same principal as &lt;@link #IsIgnoringPositions()&gt;, but applied to offsets.  false by default.">
+            </seealso>
+            <returns> false
+            </returns>
         </member>
-        <member name="M:Lucene.Net.Analysis.CharTokenizer.Next">
-            <summary>Returns the next token in the stream, or null at EOS. </summary>
+        <member name="M:Lucene.Net.Index.TermVectorMapper.SetDocumentNumber(System.Int32)">
+            <summary> Passes down the index of the document whose term vector is currently being mapped,
+            once for each top level call to a term vector reader.
+            <p/>
+            Default implementation IGNORES the document number.  Override if your implementation needs the document number.
+            <p/> 
+            NOTE: Document numbers are internal to Lucene and subject to change depending on indexing operations.
+            
+            </summary>
+            <param name="documentNumber">index of document currently being mapped
+            </param>
         </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.ISOLatin1AccentFilter" -->
-        <member name="M:Lucene.Net.Analysis.ISOLatin1AccentFilter.RemoveAccents(System.String)">
-            <summary> To replace accented characters in a String by unaccented equivalents.</summary>
+        <member name="F:Lucene.Net.Index.SortedTermVectorMapper.ALL">
+            <summary> Stand-in name for the field in {@link TermVectorEntry}.</summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.KeywordAnalyzer">
-            <summary> "Tokenizes" the entire stream as a single token. This is useful
-            for data like zip codes, ids, and some product names.
-            </summary>
+        <member name="M:Lucene.Net.Index.SortedTermVectorMapper.#ctor(System.Collections.Generic.IComparer{System.Object})">
+            <summary> </summary>
+            <param name="comparator">A Comparator for sorting {@link TermVectorEntry}s
+            </param>
         </member>
-        <member name="T:Lucene.Net.Analysis.KeywordTokenizer">
-            <summary> Emits the entire input as a single token.</summary>
+        <member name="M:Lucene.Net.Index.SortedTermVectorMapper.Map(System.String,System.Int32,Lucene.Net.Index.TermVectorOffsetInfo[],System.Int32[])">
+            <summary> </summary>
+            <param name="term">The term to map
+            </param>
+            <param name="frequency">The frequency of the term
+            </param>
+            <param name="offsets">Offset information, may be null
+            </param>
+            <param name="positions">Position information, may be null
+            </param>
         </member>
-        <member name="T:Lucene.Net.Analysis.LengthFilter">
-            <summary> Removes words that are too long and too short from the stream.
+        <member name="M:Lucene.Net.Index.SortedTermVectorMapper.GetTermVectorEntrySet">
+            <summary> The TermVectorEntrySet.  A SortedSet of {@link TermVectorEntry} objects.  Sort is by the comparator passed into the constructor.
+            <br/>
+            This set will be empty until after the mapping process takes place.
             
             </summary>
-            <author>  David Spencer
-            </author>
-            <version>  $Id: LengthFilter.java 347992 2005-11-21 21:41:43Z dnaber $
-            </version>
-        </member>
-        <member name="M:Lucene.Net.Analysis.LengthFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.Int32,System.Int32)">
-            <summary> Build a filter that removes words that are too long or too
-            short from the text.
-            </summary>
+            <returns> The SortedSet of {@link TermVectorEntry}.
+            </returns>
         </member>
-        <member name="M:Lucene.Net.Analysis.LengthFilter.Next">
-            <summary> Returns the next input Token whose termText() is the right len</summary>
+        <member name="F:Lucene.Net.Index.SegmentInfos.FORMAT">
+            <summary>The file format version, a negative number. </summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.LetterTokenizer">
-            <summary>A LetterTokenizer is a tokenizer that divides text at non-letters.  That's
-            to say, it defines tokens as maximal strings of adjacent letters, as defined
-            by java.lang.Character.isLetter() predicate.
-            Note: this does a decent job for most European languages, but does a terrible
-            job for some Asian languages, where words are not separated by spaces. 
+        <member name="F:Lucene.Net.Index.SegmentInfos.FORMAT_LOCKLESS">
+            <summary>This format adds details used for lockless commits.  It differs
+            slightly from the previous format in that file names
+            are never re-used (write once).  Instead, each file is
+            written to the next generation.  For example,
+            segments_1, segments_2, etc.  This allows us to not use
+            a commit lock.  See <a
+            href="http://lucene.apache.org/java/docs/fileformats.html">file
+            formats</a> for details.
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Index.SegmentInfos.FORMAT_SINGLE_NORM_FILE">
+            <summary>This format adds a "hasSingleNormFile" flag into each segment info.
+            See <a href="http://issues.apache.org/jira/browse/LUCENE-756">LUCENE-756</a>
+            for details.
+            </summary>
+        </member>
+        <member name="F:Lucene.Net.Index.SegmentInfos.FORMAT_SHARED_DOC_STORE">
+            <summary>This format allows multiple segments to share a single
+            vectors and stored fields file. 
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.LetterTokenizer.#ctor(System.IO.TextReader)">
-            <summary>Construct a new LetterTokenizer. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.LetterTokenizer.IsTokenChar(System.Char)">
-            <summary>Collects only characters which satisfy
-            {@link Character#isLetter(char)}.
+        <member name="F:Lucene.Net.Index.SegmentInfos.version">
+            <summary> counts how often the index has been changed by adding or deleting docs.
+            starting with the current time in milliseconds forces to create unique version numbers.
             </summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.LowerCaseFilter">
-            <summary> Normalizes token text to lower case.
+        <member name="F:Lucene.Net.Index.SegmentInfos.infoStream">
+            <summary> If non-null, information about loading segments_N files</summary>
+            <seealso cref="!:setInfoStream.">
+            </seealso>
+        </member>
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetCurrentSegmentGeneration(System.String[])">
+            <summary> Get the generation (N) of the current segments_N file
+            from a list of files.
             
             </summary>
-            <version>  $Id: LowerCaseFilter.java 150259 2004-03-29 22:48:07Z cutting $
-            </version>
-        </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.LowerCaseTokenizer" -->
-        <member name="M:Lucene.Net.Analysis.LowerCaseTokenizer.#ctor(System.IO.TextReader)">
-            <summary>Construct a new LowerCaseTokenizer. </summary>
+            <param name="files">-- array of file names to check
+            </param>
         </member>
-        <member name="M:Lucene.Net.Analysis.LowerCaseTokenizer.Normalize(System.Char)">
-            <summary>Collects only characters which satisfy
-            {@link Character#isLetter(char)}.
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetCurrentSegmentGeneration(Lucene.Net.Store.Directory)">
+            <summary> Get the generation (N) of the current segments_N file
+            in the directory.
+            
             </summary>
+            <param name="directory">-- directory to search for the latest segments_N file
+            </param>
         </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.PerFieldAnalyzerWrapper" -->
-        <member name="M:Lucene.Net.Analysis.PerFieldAnalyzerWrapper.#ctor(Lucene.Net.Analysis.Analyzer)">
-            <summary> Constructs with default analyzer.
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetCurrentSegmentFileName(System.String[])">
+            <summary> Get the filename of the current segments_N file
+            from a list of files.
             
             </summary>
-            <param name="defaultAnalyzer">Any fields not specifically
-            defined to use a different analyzer will use the one provided here.
+            <param name="files">-- array of file names to check
             </param>
         </member>
-        <member name="M:Lucene.Net.Analysis.PerFieldAnalyzerWrapper.AddAnalyzer(System.String,Lucene.Net.Analysis.Analyzer)">
-            <summary> Defines an analyzer to use for the specified field.
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetCurrentSegmentFileName(Lucene.Net.Store.Directory)">
+            <summary> Get the filename of the current segments_N file
+            in the directory.
             
             </summary>
-            <param name="fieldName">field name requiring a non-default analyzer
-            </param>
-            <param name="analyzer">non-default analyzer to use for field
+            <param name="directory">-- directory to search for the latest segments_N file
             </param>
         </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Analysis.PorterStemFilter" -->
-        <member name="M:Lucene.Net.Analysis.PorterStemFilter.Next">
-            <summary>Returns the next input Token, after being stemmed </summary>
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetCurrentSegmentFileName">
+            <summary> Get the segments_N filename in use by this segment infos.</summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.PorterStemmer">
-            <summary> 
-            Stemmer, implementing the Porter Stemming Algorithm
-            
-            The Stemmer class transforms a word into its root form.  The input
-            word can be provided a character at time (by calling add()), or at once
-            by calling one of the various stem(something) methods.
+        <member name="M:Lucene.Net.Index.SegmentInfos.GenerationFromSegmentsFileName(System.String)">
+            <summary> Parse the generation off the segments file name and
+            return it.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.Reset">
-            <summary> reset() resets the stemmer so it can stem another word.  If you invoke
-            the stemmer by calling add(char) and then Stem(), you must call reset()
-            before starting another word.
-            </summary>
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetNextSegmentFileName">
+            <summary> Get the next segments_N filename that will be written.</summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.Add(System.Char)">
-            <summary> Add a character to the word being stemmed.  When you are finished
-            adding characters, you can call Stem(void) to process the word.
+        <member name="M:Lucene.Net.Index.SegmentInfos.Read(Lucene.Net.Store.Directory,System.String)">
+            <summary> Read a particular segmentFileName.  Note that this may
+            throw an IOException if a commit is in process.
+            
             </summary>
+            <param name="directory">-- directory containing the segments file
+            </param>
+            <param name="segmentFileName">-- segment file to load
+            </param>
+            <throws>  CorruptIndexException if the index is corrupt </throws>
+            <throws>  IOException if there is a low-level IO error </throws>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.ToString">
-            <summary> After a word has been stemmed, it can be retrieved by toString(),
-            or a reference to the internal buffer can be retrieved by getResultBuffer
-            and getResultLength (which is generally more efficient.)
+        <member name="M:Lucene.Net.Index.SegmentInfos.Read(Lucene.Net.Store.Directory)">
+            <summary> This version of read uses the retry logic (for lock-less
+            commits) to find the right segments file to load.
             </summary>
+            <throws>  CorruptIndexException if the index is corrupt </throws>
+            <throws>  IOException if there is a low-level IO error </throws>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.GetResultLength">
-            <summary> Returns the length of the word resulting from the stemming process.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.GetResultBuffer">
-            <summary> Returns a reference to a character buffer containing the results of
-            the stemming process.  You also need to consult getResultLength()
-            to determine the length of the result.
+        <member name="M:Lucene.Net.Index.SegmentInfos.Clone">
+            <summary> Returns a copy of this instance, also copying each
+            SegmentInfo.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.Stem(System.String)">
-            <summary> Stem a word provided as a String.  Returns the result as a String.</summary>
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetVersion">
+            <summary> version number when this SegmentInfos was generated.</summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.Stem(System.Char[])">
-            <summary>Stem a word contained in a char[].  Returns true if the stemming process
-            resulted in a word different from the input.  You can retrieve the
-            result with getResultLength()/getResultBuffer() or toString().
-            </summary>
+        <member name="M:Lucene.Net.Index.SegmentInfos.ReadCurrentVersion(Lucene.Net.Store.Directory)">
+            <summary> Current version number from segments file.</summary>
+            <throws>  CorruptIndexException if the index is corrupt </throws>
+            <throws>  IOException if there is a low-level IO error </throws>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.Stem(System.Char[],System.Int32,System.Int32)">
-            <summary>Stem a word contained in a portion of a char[] array.  Returns
-            true if the stemming process resulted in a word different from
-            the input.  You can retrieve the result with
-            getResultLength()/getResultBuffer() or toString().
+        <member name="M:Lucene.Net.Index.SegmentInfos.SetInfoStream(System.IO.StreamWriter)">
+            <summary>If non-null, information about retries when loading
+            the segments file will be printed to this.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.Stem(System.Char[],System.Int32)">
-            <summary>Stem a word contained in a leading portion of a char[] array.
-            Returns true if the stemming process resulted in a word different
-            from the input.  You can retrieve the result with
-            getResultLength()/getResultBuffer() or toString().
+        <member name="M:Lucene.Net.Index.SegmentInfos.SetDefaultGenFileRetryCount(System.Int32)">
+            <summary> Advanced: set how many times to try loading the
+            segments.gen file contents to determine current segment
+            generation.  This file is only referenced when the
+            primary method (listing the directory) fails.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.Stem">
-            <summary>Stem the word placed into the Stemmer buffer through calls to add().
-            Returns true if the stemming process resulted in a word different
-            from the input.  You can retrieve the result with
-            getResultLength()/getResultBuffer() or toString().
-            </summary>
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetDefaultGenFileRetryCount">
+            <seealso cref="!:setDefaultGenFileRetryCount">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.PorterStemmer.Main(System.String[])">
-            <summary>Test program for demonstrating the Stemmer.  It reads a file and
-            stems each word, writing the result to standard out.
-            Usage: Stemmer file-name
+        <member name="M:Lucene.Net.Index.SegmentInfos.SetDefaultGenFileRetryPauseMsec(System.Int32)">
+            <summary> Advanced: set how many milliseconds to pause in between
+            attempts to load the segments.gen file.
             </summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.SimpleAnalyzer">
-            <summary>An Analyzer that filters LetterTokenizer with LowerCaseFilter. </summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.StopAnalyzer">
-            <summary>Filters LetterTokenizer with LowerCaseFilter and StopFilter. </summary>
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetDefaultGenFileRetryPauseMsec">
+            <seealso cref="!:setDefaultGenFileRetryPauseMsec">
+            </seealso>
         </member>
-        <member name="F:Lucene.Net.Analysis.StopAnalyzer.ENGLISH_STOP_WORDS">
-            <summary>An array containing some common English words that are not usually useful
-            for searching. 
+        <member name="M:Lucene.Net.Index.SegmentInfos.SetDefaultGenLookaheadCount(System.Int32)">
+            <summary> Advanced: set how many times to try incrementing the
+            gen when loading the segments file.  This only runs if
+            the primary (listing directory) and secondary (opening
+            segments.gen file) methods fail to find the segments
+            file.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor">
-            <summary>Builds an analyzer which removes words in ENGLISH_STOP_WORDS. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor(System.Collections.Hashtable)">
-            <summary>Builds an analyzer with the stop words from the given set.</summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor(System.String[])">
-            <summary>Builds an analyzer which removes words in the provided array. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor(System.IO.FileInfo)">
-            <summary>Builds an analyzer with the stop words from the given file.</summary>
-            <seealso cref="!:WordlistLoader.GetWordSet(File)">
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetDefaultGenLookahedCount">
+            <seealso cref="!:setDefaultGenLookaheadCount">
             </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.StopAnalyzer.#ctor(System.IO.TextReader)">
-            <summary>Builds an analyzer with the stop words from the given reader.</summary>
-            <seealso cref="!:WordlistLoader.GetWordSet(Reader)">
+        <member name="M:Lucene.Net.Index.SegmentInfos.GetInfoStream">
+            <seealso cref="!:setInfoStream">
             </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.StopAnalyzer.TokenStream(System.String,System.IO.TextReader)">
-            <summary>Filters LowerCaseTokenizer with StopFilter. </summary>
+        <member name="M:Lucene.Net.Index.SegmentInfos.Range(System.Int32,System.Int32)">
+            <summary> Returns a new SegmentInfos containg the SegmentInfo
+            instances in the specified range first (inclusive) to
+            last (exclusive), so total number of segments returned
+            is last-first.
+            </summary>
+        </member>
+        <member name="T:Lucene.Net.Index.SegmentInfos.FindSegmentsFile">
+            <summary> Utility class for executing code that needs to do
+            something with the current segments file.  This is
+            necessary with lock-less commits because from the time
+            you locate the current segments file name, until you
+            actually open it, read its contents, or check modified
+            time, etc., it could have been deleted due to a writer
+            commit finishing.
+            </summary>
+        </member>
+        <member name="M:Lucene.Net.Index.SegmentInfos.FindSegmentsFile.DoBody(System.String)">
+            <summary> Subclass must implement this.  The assumption is an
+            IOException will be thrown if something goes wrong
+            during the processing that could have been caused by
+            a writer committing.
+            </summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.StopFilter">
-            <summary> Removes stop words from a token stream.</summary>
+        <member name="T:Lucene.Net.Index.IndexFileNameFilter">
+            <summary> Filename filter that accept filenames and extensions only created by Lucene.
+            
+            </summary>
+            <author>  Daniel Naber / Bernhard Messer
+            </author>
+            <version>  $rcs = ' $Id: Exp $ ' ;
+            </version>
         </member>
-        <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.String[])">
-            <summary> Construct a token stream filtering the given input.</summary>
+        <member name="M:Lucene.Net.Index.IndexFileNameFilter.IsCFSFile(System.String)">
+            <summary> Returns true if this is a file that would be contained
+            in a CFS file.  This function should only be called on
+            files that pass the above "accept" (ie, are already
+            known to be a Lucene index file).
+            </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.String[],System.Boolean)">
-            <summary> Constructs a filter which removes words from the input
-            TokenStream that are named in the array of words.
+        <member name="T:Lucene.Net.Index.FieldInfos">
+            <summary>Access to the Fieldable Info file that describes document fields and whether or
+            not they are indexed. Each segment has a separate Fieldable Info file. Objects
+            of this class are thread-safe for multiple readers, but only one thread can
+            be adding documents at a time, with no other reader or writer threads
+            accessing this object.
             </summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.Collections.Hashtable,System.Boolean)">
-            <summary> Construct a token stream filtering the given input.</summary>
-            <param name="input">
-            </param>
-            <param name="stopWords">The set of Stop Words, as Strings.  If ignoreCase is true, all strings should be lower cased
+        <member name="M:Lucene.Net.Index.FieldInfos.#ctor(Lucene.Net.Store.Directory,System.String)">
+            <summary> Construct a FieldInfos object using the directory and the name of the file
+            IndexInput
+            </summary>
+            <param name="d">The directory to open the IndexInput from
             </param>
-            <param name="ignoreCase">-Ignore case when stopping.  The stopWords set must be setup to contain only lower case words 
+            <param name="name">The name of the file to open the IndexInput from in the Directory
             </param>
+            <throws>  IOException </throws>
         </member>
-        <member name="M:Lucene.Net.Analysis.StopFilter.#ctor(Lucene.Net.Analysis.TokenStream,System.Collections.Hashtable)">
-            <summary> Constructs a filter which removes words from the input
-            TokenStream that are named in the Set.
-            It is crucial that an efficient Set implementation is used
-            for maximum performance.
-            
-            </summary>
-            <seealso cref="M:Lucene.Net.Analysis.StopFilter.MakeStopSet(System.String[])">
-            </seealso>
+        <member name="M:Lucene.Net.Index.FieldInfos.Clone">
+            <summary> Returns a deep clone of this FieldInfos instance.</summary>
         </member>
-        <member name="M:Lucene.Net.Analysis.StopFilter.MakeStopSet(System.String[])">
-            <summary> Builds a Set from an array of stop words,
-            appropriate for passing into the StopFilter constructor.
-            This permits this stopWords construction to be cached once when
-            an Analyzer is constructed.
+        <member name="M:Lucene.Net.Index.FieldInfos.Add(Lucene.Net.Documents.Document)">
+            <summary>Adds field info for a Document. </summary>
+        </member>
+        <member name="M:Lucene.Net.Index.FieldInfos.AddIndexed(System.Collections.ICollection,System.Boolean,System.Boolean,System.Boolean)">
+            <summary> Add fields that are indexed. Whether they have termvectors has to be specified.
             
             </summary>
-            <seealso cref="!:MakeStopSet(String[], boolean) passing false to ignoreCase">
-            </seealso>
-        </member>
-        <member name="M:Lucene.Net.Analysis.StopFilter.MakeStopSet(System.String[],System.Boolean)">
-            <summary> </summary>
-            <param name="stopWords">
+            <param name="names">The names of the fields
             </param>
-            <param name="ignoreCase">If true, all words are lower cased first.  
+            <param name="storeTermVectors">Whether the fields store term vectors or not
+            </param>
+            <param name="storePositionWithTermVector">treu if positions should be stored.
+            </param>
+            <param name="storeOffsetWithTermVector">true if offsets should be stored
             </param>
-            <returns> a Set containing the words
-            </returns>
-        </member>
-        <member name="M:Lucene.Net.Analysis.StopFilter.Next">
-            <summary> Returns the next input Token whose termText() is not a stop word.</summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.Token">
-            <summary>A Token is an occurence of a term from the text of a field.  It consists of
-            a term's text, the start and end offset of the term in the text of the field,
-            and a type string.
-            The start and end offsets permit applications to re-associate a token with
-            its source text, e.g., to display highlighted query terms in a document
-            browser, or to show matching text fragments in a KWIC (KeyWord In Context)
-            display, etc.
-            The type is an interned string, assigned by a lexical analyzer
-            (a.k.a. tokenizer), naming the lexical or syntactic class that the token
-            belongs to.  For example an end of sentence marker token might be implemented
-            with type "eos".  The default token type is "word".  
-            </summary>
-        </member>
-        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Analysis.Token.#ctor(System.String,System.Int32,System.Int32)" -->
-        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Analysis.Token.#ctor(System.String,System.Int32,System.Int32,System.String)" -->
-        <!-- Badly formed XML comment ignored for member "M:Lucene.Net.Analysis.Token.SetPositionIncrement(System.Int32)" -->
-        <member name="M:Lucene.Net.Analysis.Token.GetPositionIncrement">
-            <summary>Returns the position increment of this Token.</summary>
-            <seealso cref="!:setPositionIncrement">
-            </seealso>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Token.TermText">
-            <summary>Returns the Token's term text. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Token.StartOffset">
-            <summary>Returns this Token's starting offset, the position of the first character
-            corresponding to this token in the source text.
-            Note that the difference between endOffset() and startOffset() may not be
-            equal to termText.length(), as the term text may have been altered by a
-            stemmer or some other filter. 
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Token.EndOffset">
-            <summary>Returns this Token's ending offset, one greater than the position of the
-            last character corresponding to this token in the source text. 
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.Token.Type">
-            <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.WhitespaceAnalyzer">
-            <summary>An Analyzer that uses WhitespaceTokenizer. </summary>
-        </member>
-        <member name="T:Lucene.Net.Analysis.WhitespaceTokenizer">
-            <summary>A WhitespaceTokenizer is a tokenizer that divides text at whitespace.
-            Adjacent sequences of non-Whitespace characters form tokens. 
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.WhitespaceTokenizer.#ctor(System.IO.TextReader)">
-            <summary>Construct a new WhitespaceTokenizer. </summary>
-        </member>
-        <member name="M:Lucene.Net.Analysis.WhitespaceTokenizer.IsTokenChar(System.Char)">
-            <summary>Collects only characters which do not satisfy
-            {@link Character#isWhitespace(char)}.
-            </summary>
         </member>
-        <member name="T:Lucene.Net.Analysis.WordlistLoader">
-            <summary> Loader for text files that represent a list of stopwords.
+        <member name="M:Lucene.Net.Index.FieldInfos.Add(System.Collections.ICollection,System.Boolean)">
+            <summary> Assumes the fields are not storing term vectors.
             
             </summary>
-            <author>  Gerhard Schwarz
-            </author>
-            <version>  $Id: WordlistLoader.java 192989 2005-06-22 19:59:03Z dnaber $
-            </version>
-        </member>
-        <member name="M:Lucene.Net.Analysis.WordlistLoader.GetWordSet(System.IO.FileInfo)">
-            <summary> Loads a text file and adds every line as an entry to a HashSet (omitting
-            leading and trailing whitespace). Every line of the file should contain only
-            one word. The words need to be in lowercase if you make use of an
-            Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+            <param name="names">The names of the fields
+            </param>
+            <param name="isIndexed">Whether the fields are indexed or not
             
-            </summary>
-            <param name="wordfile">File containing the wordlist
             </param>
-            <returns> A HashSet with the file's words
-            </returns>
+            <seealso cref="!:Add(String, boolean)">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.WordlistLoader.GetWordSet(System.IO.TextReader)">
-            <summary> Reads lines from a Reader and adds every line as an entry to a HashSet (omitting
-            leading and trailing whitespace). Every line of the Reader should contain only
-            one word. The words need to be in lowercase if you make use of an
-            Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        <member name="M:Lucene.Net.Index.FieldInfos.Add(System.String,System.Boolean)">
+            <summary> Calls 5 parameter add with false for all TermVector parameters.
             
             </summary>
-            <param name="reader">Reader containing the wordlist
+            <param name="name">The name of the Fieldable
             </param>
-            <returns> A HashSet with the reader's words
-            </returns>
+            <param name="isIndexed">true if the field is indexed
+            </param>
+            <seealso cref="!:Add(String, boolean, boolean, boolean, boolean)">
+            </seealso>
         </member>
-        <member name="M:Lucene.Net.Analysis.WordlistLoader.MakeWordTable(System.Collections.Hashtable)">
-            <summary> Builds a wordlist table, using words as both keys and values
-            for backward compatibility.
+        <member name="M:Lucene.Net.Index.FieldInfos.Add(System.String,System.Boolean,System.Boolean)">
+            <summary> Calls 5 parameter add with false for term vector positions and offsets.
             
             </summary>
-            <param name="wordSet">  stopword set
+            <param name="name">The name of the field
+            </param>
+            <param name="isIndexed"> true if the field is indexed
+            </param>
+            <param name="storeTermVector">true if the term vector should be stored
             </param>
         </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Documents.DateField" -->
-        <member name="M:Lucene.Net.Documents.DateField.DateToString(System.DateTime)">
-            <summary> Converts a Date to a string suitable for indexing.</summary>
-            <throws>  RuntimeException if the date specified in the </throws>
-            <summary> method argument is before 1970
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Documents.DateField.TimeToString(System.Int64)">
-            <summary> Converts a millisecond time to a string suitable for indexing.</summary>
-            <throws>  RuntimeException if the time specified in the </throws>
-            <summary> method argument is negative, that is, before 1970
-            </summary>
-        </member>
-        <member name="M:Lucene.Net.Documents.DateField.StringToTime(System.String)">
-            <summary>Converts a string-encoded date into a millisecond time. </summary>
-        </member>
-        <member name="M:Lucene.Net.Documents.DateField.StringToDate(System.String)">
-            <summary>Converts a string-encoded date into a Date object. </summary>
-        </member>
-        <!-- Badly formed XML comment ignored for member "T:Lucene.Net.Documents.DateTools" -->
-        <member name="M:Lucene.Net.Documents.DateTools.DateToString(System.DateTime,Lucene.Net.Documents.DateTools.Resolution)">
-            <summary> Converts a Date to a string suitable for indexing.
+        <member name="M:Lucene.Net.Index.FieldInfos.Add(System.String,System.Boolean,System.Boolean,System.Boolean,System.Boolean)">
+            <summary>If the field is not yet known, adds it. If it is known, checks to make
+            sure that the isIndexed flag is the same as was given previously for this
+            field. If not - marks it as being indexed.  Same goes for the TermVector
+            parameters.
             
             </summary>
-            <param name="date">the date to be converted
+            <param name="name">The name of the field
             </param>
-            <param name="resolution">the desired resolution, see

[... 15438 lines stripped ...]


Mime
View raw message