lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [13/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:19 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/IndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/IndexWriter.cs b/src/Lucene.Net.Core/Index/IndexWriter.cs
deleted file mode 100644
index b6bf4b7..0000000
--- a/src/Lucene.Net.Core/Index/IndexWriter.cs
+++ /dev/null
@@ -1,5954 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Concurrent;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Globalization;
-using System.IO;
-using System.Linq;
-using System.Reflection;
-using System.Text;
-using System.Threading;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using Analyzer = Lucene.Net.Analysis.Analyzer;
-    using IBits = Lucene.Net.Util.IBits;
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using Codec = Lucene.Net.Codecs.Codec;
-    using CompoundFileDirectory = Lucene.Net.Store.CompoundFileDirectory;
-    using Constants = Lucene.Net.Util.Constants;
-    using Directory = Lucene.Net.Store.Directory;
-    using FieldNumbers = Lucene.Net.Index.FieldInfos.FieldNumbers;
-    using InfoStream = Lucene.Net.Util.InfoStream;
-    using IOContext = Lucene.Net.Store.IOContext;
-    using IOUtils = Lucene.Net.Util.IOUtils;
-    using Lock = Lucene.Net.Store.Lock;
-    using LockObtainFailedException = Lucene.Net.Store.LockObtainFailedException;
-    using Lucene3xCodec = Lucene.Net.Codecs.Lucene3x.Lucene3xCodec;
-    using Lucene3xSegmentInfoFormat = Lucene.Net.Codecs.Lucene3x.Lucene3xSegmentInfoFormat;
-    using MergeInfo = Lucene.Net.Store.MergeInfo;
-    using Query = Lucene.Net.Search.Query;
-    using TrackingDirectoryWrapper = Lucene.Net.Store.TrackingDirectoryWrapper;
-
-    /// <summary>
-    /// An <see cref="IndexWriter"/> creates and maintains an index.
-    ///
-    /// <para>The <see cref="OpenMode"/> option on
-    /// <see cref="IndexWriterConfig.OpenMode"/> determines
-    /// whether a new index is created, or whether an existing index is
-    /// opened. Note that you can open an index with <see cref="OpenMode.CREATE"/>
-    /// even while readers are using the index. The old readers will
-    /// continue to search the "point in time" snapshot they had opened,
-    /// and won't see the newly created index until they re-open. If
-    /// <see cref="OpenMode.CREATE_OR_APPEND"/> is used <see cref="IndexWriter"/> will create a
-    /// new index if there is not already an index at the provided path
-    /// and otherwise open the existing index.</para>
-    ///
-    /// <para>In either case, documents are added with <see cref="AddDocument(IEnumerable{IIndexableField})"/>
-    /// and removed with <see cref="DeleteDocuments(Term)"/> or
-    /// <see cref="DeleteDocuments(Query)"/>. A document can be updated with
-    /// <see cref="UpdateDocument(Term, IEnumerable{IIndexableField})"/> (which just deletes
-    /// and then adds the entire document). When finished adding, deleting
-    /// and updating documents, <see cref="Dispose()"/> should be called.</para>
-    ///
-    /// <a name="flush"></a>
-    /// <para>These changes are buffered in memory and periodically
-    /// flushed to the <see cref="Store.Directory"/> (during the above method
-    /// calls). A flush is triggered when there are enough added documents
-    /// since the last flush. Flushing is triggered either by RAM usage of the
-    /// documents (see <see cref="LiveIndexWriterConfig.RAMBufferSizeMB"/>) or the
-    /// number of added documents (see <see cref="LiveIndexWriterConfig.MaxBufferedDocs"/>).
-    /// The default is to flush when RAM usage hits
-    /// <see cref="IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB"/> MB. For
-    /// best indexing speed you should flush by RAM usage with a
-    /// large RAM buffer. Additionally, if <see cref="IndexWriter"/> reaches the configured number of
-    /// buffered deletes (see <see cref="LiveIndexWriterConfig.MaxBufferedDeleteTerms"/>)
-    /// the deleted terms and queries are flushed and applied to existing segments.
-    /// In contrast to the other flush options <see cref="LiveIndexWriterConfig.RAMBufferSizeMB"/> and
-    /// <see cref="LiveIndexWriterConfig.MaxBufferedDocs"/>, deleted terms
-    /// won't trigger a segment flush. Note that flushing just moves the
-    /// internal buffered state in <see cref="IndexWriter"/> into the index, but
-    /// these changes are not visible to <see cref="IndexReader"/> until either
-    /// <see cref="Commit()"/> or <see cref="Dispose()"/> is called.  A flush may
-    /// also trigger one or more segment merges which by default
-    /// run with a background thread so as not to block the
-    /// addDocument calls (see <a href="#mergePolicy">below</a>
-    /// for changing the <see cref="mergeScheduler"/>).</para>
-    ///
-    /// <para>Opening an <see cref="IndexWriter"/> creates a lock file for the directory in use. Trying to open
-    /// another <see cref="IndexWriter"/> on the same directory will lead to a
-    /// <see cref="LockObtainFailedException"/>. The <see cref="LockObtainFailedException"/>
-    /// is also thrown if an <see cref="IndexReader"/> on the same directory is used to delete documents
-    /// from the index.</para>
-    ///
-    /// <a name="deletionPolicy"></a>
-    /// <para>Expert: <see cref="IndexWriter"/> allows an optional
-    /// <see cref="IndexDeletionPolicy"/> implementation to be
-    /// specified.  You can use this to control when prior commits
-    /// are deleted from the index.  The default policy is
-    /// <see cref="KeepOnlyLastCommitDeletionPolicy"/> which removes all prior
-    /// commits as soon as a new commit is done (this matches
-    /// behavior before 2.2).  Creating your own policy can allow
-    /// you to explicitly keep previous "point in time" commits
-    /// alive in the index for some time, to allow readers to
-    /// refresh to the new commit without having the old commit
-    /// deleted out from under them.  This is necessary on
-    /// filesystems like NFS that do not support "delete on last
-    /// close" semantics, which Lucene's "point in time" search
-    /// normally relies on. </para>
-    ///
-    /// <a name="mergePolicy"></a> <para>Expert:
-    /// <see cref="IndexWriter"/> allows you to separately change
-    /// the <see cref="mergePolicy"/> and the <see cref="mergeScheduler"/>.
-    /// The <see cref="mergePolicy"/> is invoked whenever there are
-    /// changes to the segments in the index.  Its role is to
-    /// select which merges to do, if any, and return a 
-    /// <see cref="MergePolicy.MergeSpecification"/> describing the merges.
-    /// The default is <see cref="LogByteSizeMergePolicy"/>.  Then, the 
-    /// <see cref="MergeScheduler"/> is invoked with the requested merges and
-    /// it decides when and how to run the merges.  The default is
-    /// <see cref="ConcurrentMergeScheduler"/>. </para>
-    ///
-    /// <a name="OOME"></a><para><b>NOTE</b>: if you hit an
-    /// <see cref="OutOfMemoryException"/> then <see cref="IndexWriter"/> will quietly record this
-    /// fact and block all future segment commits.  This is a
-    /// defensive measure in case any internal state (buffered
-    /// documents and deletions) were corrupted.  Any subsequent
-    /// calls to <see cref="Commit()"/> will throw an
-    /// <see cref="InvalidOperationException"/>.  The only course of action is to
-    /// call <see cref="Dispose()"/>, which internally will call
-    /// <see cref="Rollback()"/>, to undo any changes to the index since the
-    /// last commit.  You can also just call <see cref="Rollback()"/>
-    /// directly.</para>
-    ///
-    /// <a name="thread-safety"></a><para><b>NOTE</b>: 
-    /// <see cref="IndexWriter"/> instances are completely thread
-    /// safe, meaning multiple threads can call any of its
-    /// methods, concurrently.  If your application requires
-    /// external synchronization, you should <b>not</b>
-    /// synchronize on the <see cref="IndexWriter"/> instance as
-    /// this may cause deadlock; use your own (non-Lucene) objects
-    /// instead. </para>
-    ///
-    /// <para><b>NOTE</b>: If you call
-    /// <see cref="Thread.Interrupt()"/> on a thread that's within
-    /// <see cref="IndexWriter"/>, <see cref="IndexWriter"/> will try to catch this (eg, if
-    /// it's in a Wait() or <see cref="Thread.Sleep()"/>), and will then throw
-    /// the unchecked exception <see cref="ThreadInterruptedException"/>
-    /// and <b>clear</b> the interrupt status on the thread.</para>
-    /// </summary>
-
-    /*
-     * Clarification: Check Points (and commits)
-     * IndexWriter writes new index files to the directory without writing a new segments_N
-     * file which references these new files. It also means that the state of
-     * the in memory SegmentInfos object is different than the most recent
-     * segments_N file written to the directory.
-     *
-     * Each time the SegmentInfos is changed, and matches the (possibly
-     * modified) directory files, we have a new "check point".
-     * If the modified/new SegmentInfos is written to disk - as a new
-     * (generation of) segments_N file - this check point is also an
-     * IndexCommit.
-     *
-     * A new checkpoint always replaces the previous checkpoint and
-     * becomes the new "front" of the index. this allows the IndexFileDeleter
-     * to delete files that are referenced only by stale checkpoints.
-     * (files that were created since the last commit, but are no longer
-     * referenced by the "front" of the index). For this, IndexFileDeleter
-     * keeps track of the last non commit checkpoint.
-     */
-
-    public class IndexWriter : IDisposable, ITwoPhaseCommit
-    {
-        private const int UNBOUNDED_MAX_MERGE_SEGMENTS = -1;
-
-        /// <summary>
-        /// Name of the write lock in the index.
-        /// </summary>
-        public static readonly string WRITE_LOCK_NAME = "write.lock";
-
-        /// <summary>
-        /// Key for the source of a segment in the <see cref="SegmentInfo.Diagnostics"/>. </summary>
-        public static readonly string SOURCE = "source";
-
-        /// <summary>
-        /// Source of a segment which results from a merge of other segments. </summary>
-        public static readonly string SOURCE_MERGE = "merge";
-
-        /// <summary>
-        /// Source of a segment which results from a flush. </summary>
-        public static readonly string SOURCE_FLUSH = "flush";
-
-        /// <summary>
-        /// Source of a segment which results from a call to <see cref="AddIndexes(IndexReader[])"/>. </summary>
-        public static readonly string SOURCE_ADDINDEXES_READERS = "AddIndexes(params IndexReader[] readers)";
-
-        /// <summary>
-        /// Absolute hard maximum length for a term, in bytes once
-        /// encoded as UTF8.  If a term arrives from the analyzer
-        /// longer than this length, an
-        /// <see cref="ArgumentException"/> is thrown
-        /// and a message is printed to <see cref="infoStream"/>, if set (see
-        /// <see cref="IndexWriterConfig.SetInfoStream(InfoStream)"/>).
-        /// </summary>
-        public static readonly int MAX_TERM_LENGTH = DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8;
-
-        private volatile bool hitOOM;
-
-        private readonly Directory directory; // where this index resides
-        private readonly Analyzer analyzer; // how to analyze text
-
-        private long changeCount; // increments every time a change is completed
-        private long lastCommitChangeCount; // last changeCount that was committed
-
-        private IList<SegmentCommitInfo> rollbackSegments; // list of segmentInfo we will fallback to if the commit fails
-
-        internal volatile SegmentInfos pendingCommit; // set when a commit is pending (after prepareCommit() & before commit())
-        internal long pendingCommitChangeCount;
-
-        private ICollection<string> filesToCommit;
-
-        internal readonly SegmentInfos segmentInfos; // the segments
-        internal readonly FieldNumbers globalFieldNumberMap;
-
-        private readonly DocumentsWriter docWriter;
-        private readonly ConcurrentQueue<IEvent> eventQueue;
-        internal readonly IndexFileDeleter deleter;
-
-        // used by forceMerge to note those needing merging
-        private readonly IDictionary<SegmentCommitInfo, bool?> segmentsToMerge = new Dictionary<SegmentCommitInfo, bool?>();
-
-        private int mergeMaxNumSegments;
-
-        private Lock writeLock;
-
-        private volatile bool closed;
-        private volatile bool closing;
-
-        // Holds all SegmentInfo instances currently involved in
-        // merges
-        private readonly HashSet<SegmentCommitInfo> mergingSegments = new HashSet<SegmentCommitInfo>();
-
-        private readonly MergePolicy mergePolicy;
-        private readonly IMergeScheduler mergeScheduler;
-        private readonly LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
-        private readonly HashSet<MergePolicy.OneMerge> runningMerges = new HashSet<MergePolicy.OneMerge>();
-        private IList<MergePolicy.OneMerge> mergeExceptions = new List<MergePolicy.OneMerge>();
-        private long mergeGen;
-        private bool stopMerges;
-
-        internal readonly AtomicInt32 flushCount = new AtomicInt32();
-        internal readonly AtomicInt32 flushDeletesCount = new AtomicInt32();
-
-        internal ReaderPool readerPool;
-        internal readonly BufferedUpdatesStream bufferedUpdatesStream;
-
-        // this is a "write once" variable (like the organic dye
-        // on a DVD-R that may or may not be heated by a laser and
-        // then cooled to permanently record the event): it's
-        // false, until getReader() is called for the first time,
-        // at which point it's switched to true and never changes
-        // back to false.  Once this is true, we hold open and
-        // reuse SegmentReader instances internally for applying
-        // deletes, doing merges, and reopening near real-time
-        // readers.
-        private volatile bool poolReaders;
-
-        // The instance that was passed to the constructor. It is saved only in order
-        // to allow users to query an IndexWriter settings.
-        private readonly LiveIndexWriterConfig config;
-
-        internal virtual DirectoryReader GetReader()
-        {
-            return GetReader(true);
-        }
-
-        /// <summary>
-        /// Expert: returns a readonly reader, covering all
-        /// committed as well as un-committed changes to the index.
-        /// this provides "near real-time" searching, in that
-        /// changes made during an <see cref="IndexWriter"/> session can be
-        /// quickly made available for searching without closing
-        /// the writer nor calling <see cref="Commit()"/>.
-        ///
-        /// <para>Note that this is functionally equivalent to calling
-        /// Flush() and then opening a new reader.  But the turnaround time of this
-        /// method should be faster since it avoids the potentially
-        /// costly <see cref="Commit()"/>.</para>
-        ///
-        /// <para>You must close the <see cref="IndexReader"/> returned by
-        /// this method once you are done using it.</para>
-        ///
-        /// <para>It's <i>near</i> real-time because there is no hard
-        /// guarantee on how quickly you can get a new reader after
-        /// making changes with <see cref="IndexWriter"/>.  You'll have to
-        /// experiment in your situation to determine if it's
-        /// fast enough.  As this is a new and experimental
-        /// feature, please report back on your findings so we can
-        /// learn, improve and iterate.</para>
-        ///
-        /// <para>The resulting reader supports
-        /// <see cref="DirectoryReader.DoOpenIfChanged()"/>, but that call will simply forward
-        /// back to this method (though this may change in the
-        /// future).</para>
-        ///
-        /// <para>The very first time this method is called, this
-        /// writer instance will make every effort to pool the
-        /// readers that it opens for doing merges, applying
-        /// deletes, etc.  This means additional resources (RAM,
-        /// file descriptors, CPU time) will be consumed.</para>
-        ///
-        /// <para>For lower latency on reopening a reader, you should
-        /// set <see cref="LiveIndexWriterConfig.MergedSegmentWarmer"/> to
-        /// pre-warm a newly merged segment before it's committed
-        /// to the index.  This is important for minimizing
-        /// index-to-search delay after a large merge.  </para>
-        ///
-        /// <para>If an AddIndexes* call is running in another thread,
-        /// then this reader will only search those segments from
-        /// the foreign index that have been successfully copied
-        /// over, so far.</para>
-        ///
-        /// <para><b>NOTE</b>: Once the writer is disposed, any
-        /// outstanding readers may continue to be used.  However,
-        /// if you attempt to reopen any of those readers, you'll
-        /// hit an <see cref="ObjectDisposedException"/>.</para>
-        ///
-        /// @lucene.experimental
-        /// </summary>
-        /// <returns> <see cref="IndexReader"/> that covers entire index plus all
-        /// changes made so far by this <see cref="IndexWriter"/> instance
-        /// </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
-        public virtual DirectoryReader GetReader(bool applyAllDeletes)
-        {
-            EnsureOpen();
-
-            long tStart = Environment.TickCount;
-
-            if (infoStream.IsEnabled("IW"))
-            {
-                infoStream.Message("IW", "flush at getReader");
-            }
-            // Do this up front before flushing so that the readers
-            // obtained during this flush are pooled, the first time
-            // this method is called:
-            poolReaders = true;
-            DirectoryReader r = null;
-            DoBeforeFlush();
-            bool anySegmentFlushed = false;
-            /*
-             * for releasing a NRT reader we must ensure that
-             * DW doesn't add any segments or deletes until we are
-             * done with creating the NRT DirectoryReader.
-             * We release the two stage full flush after we are done opening the
-             * directory reader!
-             */
-            bool success2 = false;
-            try
-            {
-                lock (fullFlushLock)
-                {
-                    bool success = false;
-                    try
-                    {
-                        anySegmentFlushed = docWriter.FlushAllThreads(this);
-                        if (!anySegmentFlushed)
-                        {
-                            // prevent double increment since docWriter#doFlush increments the flushcount
-                            // if we flushed anything.
-                            flushCount.IncrementAndGet();
-                        }
-                        success = true;
-                        // Prevent segmentInfos from changing while opening the
-                        // reader; in theory we could instead do similar retry logic,
-                        // just like we do when loading segments_N
-                        lock (this)
-                        {
-                            MaybeApplyDeletes(applyAllDeletes);
-                            r = StandardDirectoryReader.Open(this, segmentInfos, applyAllDeletes);
-                            if (infoStream.IsEnabled("IW"))
-                            {
-                                infoStream.Message("IW", "return reader version=" + r.Version + " reader=" + r);
-                            }
-                        }
-                    }
-                    catch (System.OutOfMemoryException oom)
-                    {
-                        HandleOOM(oom, "getReader");
-                        // never reached but javac disagrees:
-                        return null;
-                    }
-                    finally
-                    {
-                        if (!success)
-                        {
-                            if (infoStream.IsEnabled("IW"))
-                            {
-                                infoStream.Message("IW", "hit exception during NRT reader");
-                            }
-                        }
-                        // Done: finish the full flush!
-                        docWriter.FinishFullFlush(success);
-                        ProcessEvents(false, true);
-                        DoAfterFlush();
-                    }
-                }
-                if (anySegmentFlushed)
-                {
-                    MaybeMerge(MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
-                }
-                if (infoStream.IsEnabled("IW"))
-                {
-                    infoStream.Message("IW", "getReader took " + (Environment.TickCount - tStart) + " msec");
-                }
-                success2 = true;
-            }
-            finally
-            {
-                if (!success2)
-                {
-                    IOUtils.CloseWhileHandlingException(r);
-                }
-            }
-            return r;
-        }
-
-        /// <summary>
-        /// Holds shared <see cref="SegmentReader"/> instances. <see cref="IndexWriter"/> uses
-        /// <see cref="SegmentReader"/>s for 1) applying deletes, 2) doing
-        /// merges, 3) handing out a real-time reader.  This pool
-        /// reuses instances of the <see cref="SegmentReader"/>s in all these
-        /// places if it is in "near real-time mode" (<see cref="GetReader()"/>
-        /// has been called on this instance).
-        /// </summary>
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        internal class ReaderPool : IDisposable
-        {
-            private readonly IndexWriter outerInstance;
-
-            public ReaderPool(IndexWriter outerInstance)
-            {
-                this.outerInstance = outerInstance;
-            }
-
-            private readonly IDictionary<SegmentCommitInfo, ReadersAndUpdates> readerMap = new Dictionary<SegmentCommitInfo, ReadersAndUpdates>();
-
-            // used only by asserts
-            public virtual bool InfoIsLive(SegmentCommitInfo info)
-            {
-                lock (this)
-                {
-                    int idx = outerInstance.segmentInfos.IndexOf(info);
-                    Debug.Assert(idx != -1, "info=" + info + " isn't live");
-                    Debug.Assert(outerInstance.segmentInfos.Info(idx) == info, "info=" + info + " doesn't match live info in segmentInfos");
-                    return true;
-                }
-            }
-
-            public virtual void Drop(SegmentCommitInfo info)
-            {
-                lock (this)
-                {
-                    ReadersAndUpdates rld;
-                    readerMap.TryGetValue(info, out rld);
-                    if (rld != null)
-                    {
-                        Debug.Assert(info == rld.Info);
-                        //        System.out.println("[" + Thread.currentThread().getName() + "] ReaderPool.drop: " + info);
-                        readerMap.Remove(info);
-                        rld.DropReaders();
-                    }
-                }
-            }
-
-            public virtual bool AnyPendingDeletes()
-            {
-                lock (this)
-                {
-                    foreach (ReadersAndUpdates rld in readerMap.Values)
-                    {
-                        if (rld.PendingDeleteCount != 0)
-                        {
-                            return true;
-                        }
-                    }
-
-                    return false;
-                }
-            }
-
-            public virtual void Release(ReadersAndUpdates rld)
-            {
-                lock (this)
-                {
-                    Release(rld, true);
-                }
-            }
-
-            public virtual void Release(ReadersAndUpdates rld, bool assertInfoLive)
-            {
-                lock (this)
-                {
-                    // Matches incRef in get:
-                    rld.DecRef();
-
-                    // Pool still holds a ref:
-                    Debug.Assert(rld.RefCount() >= 1);
-
-                    if (!outerInstance.poolReaders && rld.RefCount() == 1)
-                    {
-                        // this is the last ref to this RLD, and we're not
-                        // pooling, so remove it:
-                        //        System.out.println("[" + Thread.currentThread().getName() + "] ReaderPool.release: " + rld.info);
-                        if (rld.WriteLiveDocs(outerInstance.directory))
-                        {
-                            // Make sure we only write del docs for a live segment:
-                            Debug.Assert(assertInfoLive == false || InfoIsLive(rld.Info));
-                            // Must checkpoint because we just
-                            // created new _X_N.del and field updates files;
-                            // don't call IW.checkpoint because that also
-                            // increments SIS.version, which we do not want to
-                            // do here: it was done previously (after we
-                            // invoked BDS.applyDeletes), whereas here all we
-                            // did was move the state to disk:
-                            outerInstance.CheckpointNoSIS();
-                        }
-                        //System.out.println("IW: done writeLiveDocs for info=" + rld.info);
-
-                        //        System.out.println("[" + Thread.currentThread().getName() + "] ReaderPool.release: drop readers " + rld.info);
-                        rld.DropReaders();
-                        readerMap.Remove(rld.Info);
-                    }
-                }
-            }
-
-            public void Dispose()
-            {
-                DropAll(false);
-            }
-
-            /// <summary>
-            /// Remove all our references to readers, and commits
-            /// any pending changes.
-            /// </summary>
-            internal virtual void DropAll(bool doSave)
-            {
-                lock (this)
-                {
-                    Exception priorE = null;
-                    IEnumerator<KeyValuePair<SegmentCommitInfo, ReadersAndUpdates>> it = readerMap.GetEnumerator();
-
-                    // LUCENENET specific - Since an enumerator doesn't allow you to delete 
-                    // immediately, keep track of which elements we have iterated over so
-                    // we can delete them immediately before throwing exceptions or at the
-                    // end of the block.
-                    IList<KeyValuePair<SegmentCommitInfo, ReadersAndUpdates>> toDelete = new List<KeyValuePair<SegmentCommitInfo, ReadersAndUpdates>>();
-
-                    while (it.MoveNext())
-                    {
-                        ReadersAndUpdates rld = it.Current.Value;
-
-                        try
-                        {
-                            if (doSave && rld.WriteLiveDocs(outerInstance.directory)) // Throws IOException
-                            {
-                                // Make sure we only write del docs and field updates for a live segment:
-                                Debug.Assert(InfoIsLive(rld.Info));
-                                // Must checkpoint because we just
-                                // created new _X_N.del and field updates files;
-                                // don't call IW.checkpoint because that also
-                                // increments SIS.version, which we do not want to
-                                // do here: it was done previously (after we
-                                // invoked BDS.applyDeletes), whereas here all we
-                                // did was move the state to disk:
-                                outerInstance.CheckpointNoSIS(); // Throws IOException
-                            }
-                        }
-                        catch (Exception t)
-                        {
-                            if (doSave)
-                            {
-                                // LUCENENET specific: remove all of the
-                                // elements we have iterated over so far
-                                // before throwing an exception.
-                                readerMap.RemoveAll(toDelete);
-                                IOUtils.ReThrow(t);
-                            }
-                            else if (priorE == null)
-                            {
-                                priorE = t;
-                            }
-                        }
-
-                        // Important to remove as-we-go, not with .clear()
-                        // in the end, in case we hit an exception;
-                        // otherwise we could over-decref if close() is
-                        // called again:
-
-                        // LUCENENET specific - we cannot delete immediately,
-                        // so we store the elements that are iterated over and
-                        // delete as soon as we are done iterating (whether
-                        // that is because of an exception or not).
-                        toDelete.Add(it.Current);
-
-                        // NOTE: it is allowed that these decRefs do not
-                        // actually close the SRs; this happens when a
-                        // near real-time reader is kept open after the
-                        // IndexWriter instance is closed:
-                        try
-                        {
-                            rld.DropReaders(); // Throws IOException
-                        }
-                        catch (Exception t)
-                        {
-                            if (doSave)
-                            {
-                                // LUCENENET specific: remove all of the
-                                // elements we have iterated over so far
-                                // before throwing an exception.
-                                readerMap.RemoveAll(toDelete);
-                                IOUtils.ReThrow(t);
-                            }
-                            else if (priorE == null)
-                            {
-                                priorE = t;
-                            }
-                        }
-                    }
-                    // LUCENENET specific: remove all of the
-                    // elements we have iterated over so far
-                    // before possibly throwing an exception.
-                    readerMap.RemoveAll(toDelete);
-
-                    Debug.Assert(readerMap.Count == 0);
-                    IOUtils.ReThrow(priorE);
-                }
-            }
-
-            /// <summary>
-            /// Commit live docs changes for the segment readers for
-            /// the provided infos.
-            /// </summary>
-            /// <exception cref="IOException"> If there is a low-level I/O error </exception>
-            public virtual void Commit(SegmentInfos infos)
-            {
-                lock (this)
-                {
-                    foreach (SegmentCommitInfo info in infos.Segments)
-                    {
-                        ReadersAndUpdates rld;
-                        if (readerMap.TryGetValue(info, out rld))
-                        {
-                            Debug.Assert(rld.Info == info);
-                            if (rld.WriteLiveDocs(outerInstance.directory))
-                            {
-                                // Make sure we only write del docs for a live segment:
-                                Debug.Assert(InfoIsLive(info));
-                                // Must checkpoint because we just
-                                // created new _X_N.del and field updates files;
-                                // don't call IW.checkpoint because that also
-                                // increments SIS.version, which we do not want to
-                                // do here: it was done previously (after we
-                                // invoked BDS.applyDeletes), whereas here all we
-                                // did was move the state to disk:
-                                outerInstance.CheckpointNoSIS();
-                            }
-                        }
-                    }
-                }
-            }
-
-            /// <summary>
-            /// Obtain a <see cref="ReadersAndUpdates"/> instance from the
-            /// readerPool.  If <paramref name="create"/> is <c>true</c>, you must later call
-            /// <see cref="Release(ReadersAndUpdates)"/>.
-            /// </summary>
-            public virtual ReadersAndUpdates Get(SegmentCommitInfo info, bool create)
-            {
-                lock (this)
-                {
-                    Debug.Assert(info.Info.Dir == outerInstance.directory, "info.dir=" + info.Info.Dir + " vs " + outerInstance.directory);
-
-                    ReadersAndUpdates rld;
-                    readerMap.TryGetValue(info, out rld);
-                    if (rld == null)
-                    {
-                        if (!create)
-                        {
-                            return null;
-                        }
-                        rld = new ReadersAndUpdates(outerInstance, info);
-                        // Steal initial reference:
-                        readerMap[info] = rld;
-                    }
-                    else
-                    {
-                        Debug.Assert(rld.Info == info, "Infos are not equal");//, "rld.info=" + rld.Info + " info=" + info + " isLive?=" + InfoIsLive(rld.Info) + " vs " + InfoIsLive(info));
-                    }
-
-                    if (create)
-                    {
-                        // Return ref to caller:
-                        rld.IncRef();
-                    }
-
-                    Debug.Assert(NoDups());
-
-                    return rld;
-                }
-            }
-
-            /// <summary>
-            /// Make sure that every segment appears only once in the
-            /// pool:
-            /// </summary>
-            private bool NoDups()
-            {
-                HashSet<string> seen = new HashSet<string>();
-                foreach (SegmentCommitInfo info in readerMap.Keys)
-                {
-                    Debug.Assert(!seen.Contains(info.Info.Name));
-                    seen.Add(info.Info.Name);
-                }
-                return true;
-            }
-        }
-
-        /// <summary>
-        /// Obtain the number of deleted docs for a pooled reader.
-        /// If the reader isn't being pooled, the segmentInfo's
-        /// delCount is returned.
-        /// </summary>
-        public virtual int NumDeletedDocs(SegmentCommitInfo info)
-        {
-            EnsureOpen(false);
-            int delCount = info.DelCount;
-
-            ReadersAndUpdates rld = readerPool.Get(info, false);
-            if (rld != null)
-            {
-                delCount += rld.PendingDeleteCount;
-            }
-            return delCount;
-        }
-
-        /// <summary>
-        /// Used internally to throw an <see cref="ObjectDisposedException"/> if this
-        /// <see cref="IndexWriter"/> has been disposed or is in the process of diposing.
-        /// </summary>
-        /// <param name="failIfDisposing">
-        ///          if <c>true</c>, also fail when <see cref="IndexWriter"/> is in the process of
-        ///          disposing (<c>closing=true</c>) but not yet done disposing (
-        ///          <c>closed=false</c>) </param>
-        /// <exception cref="ObjectDisposedException">
-        ///           if this IndexWriter is closed or in the process of closing </exception>
-        protected internal void EnsureOpen(bool failIfDisposing)
-        {
-            if (closed || (failIfDisposing && closing))
-            {
-                throw new ObjectDisposedException(this.GetType().GetTypeInfo().FullName, "this IndexWriter is closed");
-            }
-        }
-
-        /// <summary>
-        /// Used internally to throw an
-        /// <see cref="ObjectDisposedException"/> if this <see cref="IndexWriter"/> has been
-        /// disposed (<c>closed=true</c>) or is in the process of
-        /// disposing (<c>closing=true</c>).
-        /// <para/>
-        /// Calls <see cref="EnsureOpen(bool)"/>.
-        /// </summary>
-        /// <exception cref="ObjectDisposedException"> if this <see cref="IndexWriter"/> is disposed </exception>
-        protected internal void EnsureOpen()
-        {
-            EnsureOpen(true);
-        }
-
-        internal readonly Codec codec; // for writing new segments
-
-        /// <summary>
-        /// Constructs a new <see cref="IndexWriter"/> per the settings given in <paramref name="conf"/>.
-        /// If you want to make "live" changes to this writer instance, use
-        /// <see cref="Config"/>.
-        ///
-        /// <para/>
-        /// <b>NOTE:</b> after ths writer is created, the given configuration instance
-        /// cannot be passed to another writer. If you intend to do so, you should
-        /// <see cref="IndexWriterConfig.Clone()"/> it beforehand.
-        /// </summary>
-        /// <param name="d">
-        ///          the index directory. The index is either created or appended
-        ///          according <see cref="IndexWriterConfig.OpenMode"/>. </param>
-        /// <param name="conf">
-        ///          the configuration settings according to which <see cref="IndexWriter"/> should
-        ///          be initialized. </param>
-        /// <exception cref="IOException">
-        ///           if the directory cannot be read/written to, or if it does not
-        ///           exist and <see cref="IndexWriterConfig.OpenMode"/> is
-        ///           <see cref="OpenMode.APPEND"/> or if there is any other low-level
-        ///           IO error </exception>
-        public IndexWriter(Directory d, IndexWriterConfig conf)
-        {
-            readerPool = new ReaderPool(this);
-            conf.SetIndexWriter(this); // prevent reuse by other instances
-            config = new LiveIndexWriterConfig(conf);
-            directory = d;
-            analyzer = config.Analyzer;
-            infoStream = config.InfoStream;
-            mergePolicy = config.MergePolicy;
-            mergePolicy.SetIndexWriter(this);
-            mergeScheduler = config.MergeScheduler;
-            codec = config.Codec;
-
-            bufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
-            poolReaders = config.UseReaderPooling;
-
-            writeLock = directory.MakeLock(WRITE_LOCK_NAME);
-
-            if (!writeLock.Obtain(config.WriteLockTimeout)) // obtain write lock
-            {
-                throw new LockObtainFailedException("Index locked for write: " + writeLock);
-            }
-
-            bool success = false;
-            try
-            {
-                OpenMode? mode = config.OpenMode;
-                bool create;
-                if (mode == OpenMode.CREATE)
-                {
-                    create = true;
-                }
-                else if (mode == OpenMode.APPEND)
-                {
-                    create = false;
-                }
-                else
-                {
-                    // CREATE_OR_APPEND - create only if an index does not exist
-                    create = !DirectoryReader.IndexExists(directory);
-                }
-
-                // If index is too old, reading the segments will throw
-                // IndexFormatTooOldException.
-                segmentInfos = new SegmentInfos();
-
-                bool initialIndexExists = true;
-
-                if (create)
-                {
-                    // Try to read first.  this is to allow create
-                    // against an index that's currently open for
-                    // searching.  In this case we write the next
-                    // segments_N file with no segments:
-                    try
-                    {
-                        segmentInfos.Read(directory);
-                        segmentInfos.Clear();
-                    }
-                    catch (IOException)
-                    {
-                        // Likely this means it's a fresh directory
-                        initialIndexExists = false;
-                    }
-
-                    // Record that we have a change (zero out all
-                    // segments) pending:
-                    Changed();
-                }
-                else
-                {
-                    segmentInfos.Read(directory);
-
-                    IndexCommit commit = config.IndexCommit;
-                    if (commit != null)
-                    {
-                        // Swap out all segments, but, keep metadata in
-                        // SegmentInfos, like version & generation, to
-                        // preserve write-once.  this is important if
-                        // readers are open against the future commit
-                        // points.
-                        if (commit.Directory != directory)
-                        {
-                            throw new ArgumentException(string.Format("IndexCommit's directory doesn't match my directory (mine: {0}, commit's: {1})", directory, commit.Directory));
-                        }
-                        SegmentInfos oldInfos = new SegmentInfos();
-                        oldInfos.Read(directory, commit.SegmentsFileName);
-                        segmentInfos.Replace(oldInfos);
-                        Changed();
-                        if (infoStream.IsEnabled("IW"))
-                        {
-                            infoStream.Message("IW", "init: loaded commit \"" + commit.SegmentsFileName + "\"");
-                        }
-                    }
-                }
-
-                rollbackSegments = segmentInfos.CreateBackupSegmentInfos();
-
-                // start with previous field numbers, but new FieldInfos
-                globalFieldNumberMap = FieldNumberMap;
-                config.FlushPolicy.Init(config);
-                docWriter = new DocumentsWriter(this, config, directory);
-                eventQueue = docWriter.EventQueue;
-
-                // Default deleter (for backwards compatibility) is
-                // KeepOnlyLastCommitDeleter:
-                lock (this)
-                {
-                    deleter = new IndexFileDeleter(directory, config.IndexDeletionPolicy, segmentInfos, infoStream, this, initialIndexExists);
-                }
-
-                if (deleter.startingCommitDeleted)
-                {
-                    // Deletion policy deleted the "head" commit point.
-                    // We have to mark ourself as changed so that if we
-                    // are closed w/o any further changes we write a new
-                    // segments_N file.
-                    Changed();
-                }
-
-                if (infoStream.IsEnabled("IW"))
-                {
-                    infoStream.Message("IW", "init: create=" + create);
-                    MessageState();
-                }
-
-                success = true;
-            }
-            finally
-            {
-                if (!success)
-                {
-                    if (infoStream.IsEnabled("IW"))
-                    {
-                        infoStream.Message("IW", "init: hit exception on init; releasing write lock");
-                    }
-                    IOUtils.CloseWhileHandlingException(writeLock);
-                    writeLock = null;
-                }
-            }
-        }
-
-        /// <summary>
-        /// Loads or returns the already loaded the global field number map for <see cref="segmentInfos"/>.
-        /// If <see cref="segmentInfos"/> has no global field number map the returned instance is empty
-        /// </summary>
-        private FieldNumbers FieldNumberMap
-        {
-            get
-            {
-                FieldNumbers map = new FieldNumbers();
-
-                foreach (SegmentCommitInfo info in segmentInfos.Segments)
-                {
-                    foreach (FieldInfo fi in SegmentReader.ReadFieldInfos(info))
-                    {
-                        map.AddOrGet(fi.Name, fi.Number, fi.DocValuesType);
-                    }
-                }
-
-                return map;
-            }
-        }
-
-        /// <summary>
-        /// Returns a <see cref="LiveIndexWriterConfig"/>, which can be used to query the <see cref="IndexWriter"/>
-        /// current settings, as well as modify "live" ones.
-        /// </summary>
-        public virtual LiveIndexWriterConfig Config
-        {
-            get
-            {
-                EnsureOpen(false);
-                return config;
-            }
-        }
-
-        private void MessageState()
-        {
-            if (infoStream.IsEnabled("IW"))
-            {
-                infoStream.Message("IW", "\ndir=" + directory + "\n" + "index=" + SegString() + "\n" + "version=" + Constants.LUCENE_VERSION + "\n" + config.ToString());
-            }
-        }
-
-        /// <summary>
-        /// Commits all changes to an index, waits for pending merges
-        /// to complete, and closes all associated files.
-        /// <para/>
-        /// This is a "slow graceful shutdown" which may take a long time
-        /// especially if a big merge is pending: If you only want to close
-        /// resources use <see cref="Rollback()"/>. If you only want to commit
-        /// pending changes and close resources see <see cref="Dispose(bool)"/>.
-        /// <para/>
-        /// Note that this may be a costly
-        /// operation, so, try to re-use a single writer instead of
-        /// closing and opening a new one.  See <see cref="Commit()"/> for
-        /// caveats about write caching done by some IO devices.
-        ///
-        /// <para> If an <see cref="Exception"/> is hit during close, eg due to disk
-        /// full or some other reason, then both the on-disk index
-        /// and the internal state of the <see cref="IndexWriter"/> instance will
-        /// be consistent.  However, the close will not be complete
-        /// even though part of it (flushing buffered documents)
-        /// may have succeeded, so the write lock will still be
-        /// held.</para>
-        ///
-        /// <para> If you can correct the underlying cause (eg free up
-        /// some disk space) then you can call <see cref="Dispose()"/> again.
-        /// Failing that, if you want to force the write lock to be
-        /// released (dangerous, because you may then lose buffered
-        /// docs in the <see cref="IndexWriter"/> instance) then you can do
-        /// something like this:</para>
-        ///
-        /// <code>
-        /// try 
-        /// {
-        ///     writer.Dispose();
-        /// } 
-        /// finally 
-        /// {
-        ///     if (IndexWriter.IsLocked(directory)) 
-        ///     {
-        ///         IndexWriter.Unlock(directory);
-        ///     }
-        /// }
-        /// </code>
-        /// 
-        /// after which, you must be certain not to use the writer
-        /// instance anymore.
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer, again.  See 
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public void Dispose()
-        {
-            Dispose(true);
-            GC.SuppressFinalize(this);
-        }
-
-        /// <summary>
-        /// Closes the index with or without waiting for currently
-        /// running merges to finish.  This is only meaningful when
-        /// using a <see cref="MergeScheduler"/> that runs merges in background
-        /// threads.
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer, again.  See 
-        /// <see cref="IndexWriter"/> for details.</para>
-        ///
-        /// <para><b>NOTE</b>: it is dangerous to always call
-        /// <c>Dispose(false)</c>, especially when <see cref="IndexWriter"/> is not open
-        /// for very long, because this can result in "merge
-        /// starvation" whereby long merges will never have a
-        /// chance to finish.  This will cause too many segments in
-        /// your index over time.</para>
-        /// </summary>
-        /// <param name="waitForMerges"> if <c>true</c>, this call will block
-        /// until all merges complete; else, it will ask all
-        /// running merges to abort, wait until those merges have
-        /// finished (which should be at most a few seconds), and
-        /// then return. </param>
-        public virtual void Dispose(bool waitForMerges)
-        {
-            // Ensure that only one thread actually gets to do the
-            // closing, and make sure no commit is also in progress:
-            lock (commitLock)
-            {
-                if (ShouldClose())
-                {
-                    // If any methods have hit OutOfMemoryError, then abort
-                    // on close, in case the internal state of IndexWriter
-                    // or DocumentsWriter is corrupt
-                    if (hitOOM)
-                    {
-                        RollbackInternal();
-                    }
-                    else
-                    {
-                        CloseInternal(waitForMerges, true);
-                        Debug.Assert(AssertEventQueueAfterClose());
-                    }
-                }
-            }
-        }
-
-        private bool AssertEventQueueAfterClose()
-        {
-            if (eventQueue.Count == 0)
-            {
-                return true;
-            }
-            foreach (IEvent e in eventQueue)
-            {
-                Debug.Assert(e is DocumentsWriter.MergePendingEvent, e.ToString());
-            }
-            return true;
-        }
-
-        /// <summary>
-        /// Returns <c>true</c> if this thread should attempt to close, or
-        /// false if IndexWriter is now closed; else, waits until
-        /// another thread finishes closing
-        /// </summary>
-        private bool ShouldClose()
-        {
-            lock (this)
-            {
-                while (true)
-                {
-                    if (!closed)
-                    {
-                        if (!closing)
-                        {
-                            closing = true;
-                            return true;
-                        }
-                        else
-                        {
-                            // Another thread is presently trying to close;
-                            // wait until it finishes one way (closes
-                            // successfully) or another (fails to close)
-                            DoWait();
-                        }
-                    }
-                    else
-                    {
-                        return false;
-                    }
-                }
-            }
-        }
-
-        private void CloseInternal(bool waitForMerges, bool doFlush)
-        {
-            bool interrupted = false;
-            try
-            {
-                if (pendingCommit != null)
-                {
-                    throw new InvalidOperationException("cannot close: prepareCommit was already called with no corresponding call to commit");
-                }
-
-                if (infoStream.IsEnabled("IW"))
-                {
-                    infoStream.Message("IW", "now flush at close waitForMerges=" + waitForMerges);
-                }
-
-                docWriter.Dispose();
-
-                try
-                {
-                    // Only allow a new merge to be triggered if we are
-                    // going to wait for merges:
-                    if (doFlush)
-                    {
-                        Flush(waitForMerges, true);
-                    }
-                    else
-                    {
-                        docWriter.Abort(this); // already closed -- never sync on IW
-                    }
-                }
-                finally
-                {
-                    try
-                    {
-                        // clean up merge scheduler in all cases, although flushing may have failed:
-                        //interrupted = Thread.Interrupted();
-                        //LUCENE TO-DO
-                        interrupted = false;
-
-                        if (waitForMerges)
-                        {
-#if !NETSTANDARD
-                            try
-                            {
-#endif    
-                            // Give merge scheduler last chance to run, in case
-                                // any pending merges are waiting:
-                                mergeScheduler.Merge(this, MergeTrigger.CLOSING, false);
-#if !NETSTANDARD
-                            }
-                            catch (ThreadInterruptedException)
-                            {
-                                // ignore any interruption, does not matter
-                                interrupted = true;
-                                if (infoStream.IsEnabled("IW"))
-                                {
-                                    infoStream.Message("IW", "interrupted while waiting for final merges");
-                                }
-                            }
-#endif
-                        }
-
-                        lock (this)
-                        {
-                            for (; ; )
-                            {
-#if !NETSTANDARD
-                                try
-                                {
-#endif
-                                    FinishMerges(waitForMerges && !interrupted);
-                                    break;
-#if !NETSTANDARD
-                                }
-                                catch (ThreadInterruptedException)
-                                {
-                                    // by setting the interrupted status, the
-                                    // next call to finishMerges will pass false,
-                                    // so it will not wait
-                                    interrupted = true;
-                                    if (infoStream.IsEnabled("IW"))
-                                    {
-                                        infoStream.Message("IW", "interrupted while waiting for merges to finish");
-                                    }
-                                }
-#endif
-                            }
-                            stopMerges = true;
-                        }
-                    }
-                    finally
-                    {
-                        // shutdown policy, scheduler and all threads (this call is not interruptible):
-                        IOUtils.CloseWhileHandlingException(mergePolicy, mergeScheduler);
-                    }
-                }
-
-                if (infoStream.IsEnabled("IW"))
-                {
-                    infoStream.Message("IW", "now call final commit()");
-                }
-
-                if (doFlush)
-                {
-                    CommitInternal();
-                }
-                ProcessEvents(false, true);
-                lock (this)
-                {
-                    // commitInternal calls ReaderPool.commit, which
-                    // writes any pending liveDocs from ReaderPool, so
-                    // it's safe to drop all readers now:
-                    readerPool.DropAll(true);
-                    deleter.Dispose();
-                }
-
-                if (infoStream.IsEnabled("IW"))
-                {
-                    infoStream.Message("IW", "at close: " + SegString());
-                }
-
-                if (writeLock != null)
-                {
-                    writeLock.Dispose(); // release write lock
-                    writeLock = null;
-                }
-                lock (this)
-                {
-                    closed = true;
-                }
-                Debug.Assert(docWriter.perThreadPool.NumDeactivatedThreadStates() == docWriter.perThreadPool.MaxThreadStates, "" + docWriter.perThreadPool.NumDeactivatedThreadStates() + " " + docWriter.perThreadPool.MaxThreadStates);
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "closeInternal");
-            }
-            finally
-            {
-                lock (this)
-                {
-                    closing = false;
-                    Monitor.PulseAll(this);
-                    if (!closed)
-                    {
-                        if (infoStream.IsEnabled("IW"))
-                        {
-                            infoStream.Message("IW", "hit exception while closing");
-                        }
-                    }
-                }
-                // finally, restore interrupt status:
-                if (interrupted)
-                {
-#if !NETSTANDARD
-                    Thread.CurrentThread.Interrupt();
-#endif
-                }
-            }
-        }
-
-        /// <summary>
-        /// Gets the <see cref="Store.Directory"/> used by this index. </summary>
-        public virtual Directory Directory
-        {
-            get
-            {
-                return directory;
-            }
-        }
-
-        /// <summary>
-        /// Gets the analyzer used by this index. </summary>
-        public virtual Analyzer Analyzer
-        {
-            get
-            {
-                EnsureOpen();
-                return analyzer;
-            }
-        }
-
-        /// <summary>
-        /// Gets total number of docs in this index, including
-        /// docs not yet flushed (still in the RAM buffer),
-        /// not counting deletions.
-        /// </summary>
-        /// <seealso cref="NumDocs"/>
-        public virtual int MaxDoc
-        {
-            get
-            {
-                lock (this)
-                {
-                    EnsureOpen();
-                    return docWriter.NumDocs + segmentInfos.TotalDocCount;
-                }
-            }
-        }
-
-        /// <summary>
-        /// Gets total number of docs in this index, including
-        /// docs not yet flushed (still in the RAM buffer), and
-        /// including deletions.  <b>NOTE:</b> buffered deletions
-        /// are not counted.  If you really need these to be
-        /// counted you should call <see cref="Commit()"/> first.
-        /// </summary>
-        /// <seealso cref="MaxDoc"/>
-        public virtual int NumDocs // LUCENENET NOTE: This is not a great candidate for a property, but changing because IndexReader has a property with the same name
-        {
-            get
-            {
-                lock (this)
-                {
-                    EnsureOpen();
-                    return docWriter.NumDocs + segmentInfos.Segments.Sum(info => info.Info.DocCount - NumDeletedDocs(info));
-                }
-            }
-        }
-
-        /// <summary>
-        /// Returns <c>true</c> if this index has deletions (including
-        /// buffered deletions).  Note that this will return <c>true</c>
-        /// if there are buffered Term/Query deletions, even if it
-        /// turns out those buffered deletions don't match any
-        /// documents. Also, if a merge kicked off as a result of flushing a
-        /// </summary>
-        public virtual bool HasDeletions()
-        {
-            lock (this)
-            {
-                EnsureOpen();
-                if (bufferedUpdatesStream.Any())
-                {
-                    return true;
-                }
-                if (docWriter.AnyDeletions())
-                {
-                    return true;
-                }
-                if (readerPool.AnyPendingDeletes())
-                {
-                    return true;
-                }
-                foreach (SegmentCommitInfo info in segmentInfos.Segments)
-                {
-                    if (info.HasDeletions)
-                    {
-                        return true;
-                    }
-                }
-                return false;
-            }
-        }
-
-        /// <summary>
-        /// Adds a document to this index.
-        ///
-        /// <para> Note that if an <see cref="Exception"/> is hit (for example disk full)
-        /// then the index will be consistent, but this document
-        /// may not have been added.  Furthermore, it's possible
-        /// the index will have one segment in non-compound format
-        /// even when using compound files (when a merge has
-        /// partially succeeded).</para>
-        ///
-        /// <para>This method periodically flushes pending documents
-        /// to the <see cref="Directory"/> (see <see cref="IndexWriter"/>), and
-        /// also periodically triggers segment merges in the index
-        /// according to the <see cref="MergePolicy"/> in use.</para>
-        ///
-        /// <para>Merges temporarily consume space in the
-        /// directory. The amount of space required is up to 1X the
-        /// size of all segments being merged, when no
-        /// readers/searchers are open against the index, and up to
-        /// 2X the size of all segments being merged when
-        /// readers/searchers are open against the index (see
-        /// <see cref="ForceMerge(int)"/> for details). The sequence of
-        /// primitive merge operations performed is governed by the
-        /// merge policy.</para>
-        ///
-        /// <para>Note that each term in the document can be no longer
-        /// than <see cref="MAX_TERM_LENGTH"/> in bytes, otherwise an
-        /// <see cref="ArgumentException"/> will be thrown.</para>
-        ///
-        /// <para>Note that it's possible to create an invalid Unicode
-        /// string in java if a UTF16 surrogate pair is malformed.
-        /// In this case, the invalid characters are silently
-        /// replaced with the Unicode replacement character
-        /// U+FFFD.</para>
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See 
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void AddDocument(IEnumerable<IIndexableField> doc)
-        {
-            AddDocument(doc, analyzer);
-        }
-
-        /// <summary>
-        /// Adds a document to this index, using the provided <paramref name="analyzer"/> instead of the
-        /// value of <see cref="Analyzer"/>.
-        ///
-        /// <para>See <see cref="AddDocument(IEnumerable{IIndexableField})"/> for details on
-        /// index and <see cref="IndexWriter"/> state after an <see cref="Exception"/>, and
-        /// flushing/merging temporary free space requirements.</para>
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void AddDocument(IEnumerable<IIndexableField> doc, Analyzer analyzer)
-        {
-            UpdateDocument(null, doc, analyzer);
-        }
-
-        /// <summary>
-        /// Atomically adds a block of documents with sequentially
-        /// assigned document IDs, such that an external reader
-        /// will see all or none of the documents.
-        ///
-        /// <para><b>WARNING</b>: the index does not currently record
-        /// which documents were added as a block.  Today this is
-        /// fine, because merging will preserve a block. The order of
-        /// documents within a segment will be preserved, even when child
-        /// documents within a block are deleted. Most search features
-        /// (like result grouping and block joining) require you to
-        /// mark documents; when these documents are deleted these
-        /// search features will not work as expected. Obviously adding
-        /// documents to an existing block will require you the reindex
-        /// the entire block.</para>
-        ///
-        /// <para>However it's possible that in the future Lucene may
-        /// merge more aggressively re-order documents (for example,
-        /// perhaps to obtain better index compression), in which case
-        /// you may need to fully re-index your documents at that time.</para>
-        ///
-        /// <para>See <see cref="AddDocument(IEnumerable{IIndexableField})"/> for details on
-        /// index and <see cref="IndexWriter"/> state after an <see cref="Exception"/>, and
-        /// flushing/merging temporary free space requirements.</para>
-        ///
-        /// <para><b>NOTE</b>: tools that do offline splitting of an index
-        /// (for example, IndexSplitter in Lucene.Net.Misc) or
-        /// re-sorting of documents (for example, IndexSorter in
-        /// contrib) are not aware of these atomically added documents
-        /// and will likely break them up.  Use such tools at your
-        /// own risk!</para>
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// 
-        /// @lucene.experimental 
-        /// </summary>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void AddDocuments(IEnumerable<IEnumerable<IIndexableField>> docs)
-        {
-            AddDocuments(docs, analyzer);
-        }
-
-        /// <summary>
-        /// Atomically adds a block of documents, analyzed using the
-        /// provided <paramref name="analyzer"/>, with sequentially assigned document
-        /// IDs, such that an external reader will see all or none
-        /// of the documents.
-        /// <para/>
-        /// @lucene.experimental
-        /// </summary>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void AddDocuments(IEnumerable<IEnumerable<IIndexableField>> docs, Analyzer analyzer)
-        {
-            UpdateDocuments(null, docs, analyzer);
-        }
-
-        /// <summary>
-        /// Atomically deletes documents matching the provided
-        /// <paramref name="delTerm"/> and adds a block of documents with sequentially
-        /// assigned document IDs, such that an external reader
-        /// will see all or none of the documents.
-        /// <para/>
-        /// @lucene.experimental
-        /// </summary>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <seealso cref="AddDocuments(IEnumerable{IEnumerable{IIndexableField}})"/>
-        public virtual void UpdateDocuments(Term delTerm, IEnumerable<IEnumerable<IIndexableField>> docs)
-        {
-            UpdateDocuments(delTerm, docs, analyzer);
-        }
-
-        /// <summary>
-        /// Atomically deletes documents matching the provided
-        /// <paramref name="delTerm"/> and adds a block of documents, analyzed using
-        /// the provided <paramref name="analyzer"/>, with sequentially
-        /// assigned document IDs, such that an external reader
-        /// will see all or none of the documents.
-        /// <para/>
-        /// @lucene.experimental
-        /// </summary>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <seealso cref="AddDocuments(IEnumerable{IEnumerable{IIndexableField}})"/>
-        public virtual void UpdateDocuments(Term delTerm, IEnumerable<IEnumerable<IIndexableField>> docs, Analyzer analyzer)
-        {
-            EnsureOpen();
-            try
-            {
-                bool success = false;
-                try
-                {
-                    if (docWriter.UpdateDocuments(docs, analyzer, delTerm))
-                    {
-                        ProcessEvents(true, false);
-                    }
-                    success = true;
-                }
-                finally
-                {
-                    if (!success)
-                    {
-                        if (infoStream.IsEnabled("IW"))
-                        {
-                            infoStream.Message("IW", "hit exception updating document");
-                        }
-                    }
-                }
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "updateDocuments");
-            }
-        }
-
-        /// <summary>
-        /// Deletes the document(s) containing <paramref name="term"/>.
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See 
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <param name="term"> the term to identify the documents to be deleted </param>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void DeleteDocuments(Term term)
-        {
-            EnsureOpen();
-            try
-            {
-                if (docWriter.DeleteTerms(term))
-                {
-                    ProcessEvents(true, false);
-                }
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "deleteDocuments(Term)");
-            }
-        }
-
-        /// <summary>
-        /// Expert: attempts to delete by document ID, as long as
-        /// the provided <paramref name="readerIn"/> is a near-real-time reader (from 
-        /// <see cref="DirectoryReader.Open(IndexWriter, bool)"/>.  If the
-        /// provided <paramref name="readerIn"/> is an NRT reader obtained from this
-        /// writer, and its segment has not been merged away, then
-        /// the delete succeeds and this method returns <c>true</c>; else, it
-        /// returns <c>false</c> the caller must then separately delete by
-        /// Term or Query.
-        ///
-        /// <b>NOTE</b>: this method can only delete documents
-        /// visible to the currently open NRT reader.  If you need
-        /// to delete documents indexed after opening the NRT
-        /// reader you must use the other DeleteDocument() methods
-        /// (e.g., <see cref="DeleteDocuments(Term)"/>).
-        /// </summary>
-        public virtual bool TryDeleteDocument(IndexReader readerIn, int docID)
-        {
-            lock (this)
-            {
-                AtomicReader reader;
-                if (readerIn is AtomicReader)
-                {
-                    // Reader is already atomic: use the incoming docID:
-                    reader = (AtomicReader)readerIn;
-                }
-                else
-                {
-                    // Composite reader: lookup sub-reader and re-base docID:
-                    IList<AtomicReaderContext> leaves = readerIn.Leaves;
-                    int subIndex = ReaderUtil.SubIndex(docID, leaves);
-                    reader = leaves[subIndex].AtomicReader;
-                    docID -= leaves[subIndex].DocBase;
-                    Debug.Assert(docID >= 0);
-                    Debug.Assert(docID < reader.MaxDoc);
-                }
-
-                if (!(reader is SegmentReader))
-                {
-                    throw new System.ArgumentException("the reader must be a SegmentReader or composite reader containing only SegmentReaders");
-                }
-
-                SegmentCommitInfo info = ((SegmentReader)reader).SegmentInfo;
-
-                // TODO: this is a slow linear search, but, number of
-                // segments should be contained unless something is
-                // seriously wrong w/ the index, so it should be a minor
-                // cost:
-
-                if (segmentInfos.IndexOf(info) != -1)
-                {
-                    ReadersAndUpdates rld = readerPool.Get(info, false);
-                    if (rld != null)
-                    {
-                        lock (bufferedUpdatesStream)
-                        {
-                            rld.InitWritableLiveDocs();
-                            if (rld.Delete(docID))
-                            {
-                                int fullDelCount = rld.Info.DelCount + rld.PendingDeleteCount;
-                                if (fullDelCount == rld.Info.Info.DocCount)
-                                {
-                                    // If a merge has already registered for this
-                                    // segment, we leave it in the readerPool; the
-                                    // merge will skip merging it and will then drop
-                                    // it once it's done:
-                                    if (!mergingSegments.Contains(rld.Info))
-                                    {
-                                        segmentInfos.Remove(rld.Info);
-                                        readerPool.Drop(rld.Info);
-                                        Checkpoint();
-                                    }
-                                }
-
-                                // Must bump changeCount so if no other changes
-                                // happened, we still commit this change:
-                                Changed();
-                            }
-                            //System.out.println("  yes " + info.info.name + " " + docID);
-                            return true;
-                        }
-                    }
-                    else
-                    {
-                        //System.out.println("  no rld " + info.info.name + " " + docID);
-                    }
-                }
-                else
-                {
-                    //System.out.println("  no seg " + info.info.name + " " + docID);
-                }
-                return false;
-            }
-        }
-
-        /// <summary>
-        /// Deletes the document(s) containing any of the
-        /// terms. All given deletes are applied and flushed atomically
-        /// at the same time.
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <param name="terms"> array of terms to identify the documents
-        /// to be deleted </param>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void DeleteDocuments(params Term[] terms)
-        {
-            EnsureOpen();
-            try
-            {
-                if (docWriter.DeleteTerms(terms))
-                {
-                    ProcessEvents(true, false);
-                }
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "deleteDocuments(Term..)");
-            }
-        }
-
-        /// <summary>
-        /// Deletes the document(s) matching the provided query.
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <param name="query"> the query to identify the documents to be deleted </param>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void DeleteDocuments(Query query)
-        {
-            EnsureOpen();
-            try
-            {
-                if (docWriter.DeleteQueries(query))
-                {
-                    ProcessEvents(true, false);
-                }
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "deleteDocuments(Query)");
-            }
-        }
-
-        /// <summary>
-        /// Deletes the document(s) matching any of the provided queries.
-        /// All given deletes are applied and flushed atomically at the same time.
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <param name="queries"> array of queries to identify the documents
-        /// to be deleted </param>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void DeleteDocuments(params Query[] queries)
-        {
-            EnsureOpen();
-            try
-            {
-                if (docWriter.DeleteQueries(queries))
-                {
-                    ProcessEvents(true, false);
-                }
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "deleteDocuments(Query..)");
-            }
-        }
-
-        /// <summary>
-        /// Updates a document by first deleting the document(s)
-        /// containing <paramref name="term"/> and then adding the new
-        /// document.  The delete and then add are atomic as seen
-        /// by a reader on the same index (flush may happen only after
-        /// the add).
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <param name="term"> the term to identify the document(s) to be
-        /// deleted </param>
-        /// <param name="doc"> the document to be added </param>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void UpdateDocument(Term term, IEnumerable<IIndexableField> doc)
-        {
-            EnsureOpen();
-            UpdateDocument(term, doc, analyzer);
-        }
-
-        /// <summary>
-        /// Updates a document by first deleting the document(s)
-        /// containing <paramref name="term"/> and then adding the new
-        /// document.  The delete and then add are atomic as seen
-        /// by a reader on the same index (flush may happen only after
-        /// the add).
-        ///
-        /// <para><b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/>
-        /// you should immediately dispose the writer.  See
-        /// <see cref="IndexWriter"/> for details.</para>
-        /// </summary>
-        /// <param name="term"> the term to identify the document(s) to be
-        /// deleted </param>
-        /// <param name="doc"> the document to be added </param>
-        /// <param name="analyzer"> the analyzer to use when analyzing the document </param>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public virtual void UpdateDocument(Term term, IEnumerable<IIndexableField> doc, Analyzer analyzer)
-        {
-            EnsureOpen();
-            try
-            {
-                bool success = false;
-                try
-                {
-                    if (docWriter.UpdateDocument(doc, analyzer, term))
-                    {
-                        ProcessEvents(true, false);
-                    }
-                    success = true;
-                }
-                finally
-                {
-                    if (!success)
-                    {
-                        if (infoStream.IsEnabled("IW"))
-                        {
-                            infoStream.Message("IW", "hit exception updating document");
-                        }
-                    }
-                }
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "updateDocument");
-            }
-        }
-
-        /// <summary>
-        /// Updates a document's <see cref="NumericDocValues"/> for <paramref name="field"/> to the
-        /// given <paramref name="value"/>. This method can be used to 'unset' a document's
-        /// value by passing <c>null</c> as the new <paramref name="value"/>. Also, you can only update
-        /// fields that already exist in the index, not add new fields through this
-        /// method.
-        ///
-        /// <para>
-        /// <b>NOTE</b>: if this method hits an <see cref="OutOfMemoryException"/> you should immediately
-        /// dispose the writer. See <see cref="IndexWriter"/> for details.
-        /// </para>
-        /// </summary>
-        /// <param name="term">
-        ///          the term to identify the document(s) to be updated </param>
-        /// <param name="field">
-        ///          field name of the <see cref="NumericDocValues"/> field </param>
-        /// <param name="value">
-        ///          new value for the field </param>
-        /// <exception cref="CorruptIndexException">
-        ///           if the index is corrupt </exception>
-        /// <exception cref="IOException">
-        ///           if there is a low-level IO error </exception>
-        public virtual void UpdateNumericDocValue(Term term, string field, long? value)
-        {
-            EnsureOpen();
-            if (!globalFieldNumberMap.Contains(field, DocValuesType.NUMERIC))
-            {
-                throw new System.ArgumentException("can only update existing numeric-docvalues fields!");
-            }
-            try
-            {
-                if (docWriter.UpdateNumericDocValue(term, field, value))
-                {
-                    ProcessEvents(true, false);
-                }
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "updateNumericDocValue");
-            }
-        }
-
-        /// <summary>
-        /// Updates a document's <see cref="BinaryDocValues"/> for <paramref name="field"/> to the
-        /// given <paramref name="value"/>. this method can be used to 'unset' a document's
-        /// value by passing <c>null</c> as the new <paramref name="value"/>. Also, you can only update
-        /// fields that already exist in the index, not add new fields through this
-        /// method.
-        ///
-        /// <para/>
-        /// <b>NOTE:</b> this method currently replaces the existing value of all
-        /// affected documents with the new value.
-        ///
-        /// <para>
-        /// <b>NOTE:</b> if this method hits an <see cref="OutOfMemoryException"/> you should immediately
-        /// dispose the writer. See <see cref="IndexWriter"/> for details.
-        /// </para>
-        /// </summary>
-        /// <param name="term">
-        ///          the term to identify the document(s) to be updated </param>
-        /// <param name="field">
-        ///          field name of the <see cref="BinaryDocValues"/> field </param>
-        /// <param name="value">
-        ///          new value for the field </param>
-        /// <exception cref="CorruptIndexException">
-        ///           if the index is corrupt </exception>
-        /// <exception cref="IOException">
-        ///           if there is a low-level IO error </exception>
-        public virtual void UpdateBinaryDocValue(Term term, string field, BytesRef value)
-        {
-            EnsureOpen();
-            if (!globalFieldNumberMap.Contains(field, DocValuesType.BINARY))
-            {
-                throw new System.ArgumentException("can only update existing binary-docvalues fields!");
-            }
-            try
-            {
-                if (docWriter.UpdateBinaryDocValue(term, field, value))
-                {
-                    ProcessEvents(true, false);
-                }
-            }
-            catch (System.OutOfMemoryException oom)
-            {
-                HandleOOM(oom, "updateBinaryDocValue");
-            }
-        }
-
-        // for test purpose
-        internal int SegmentCount
-        {
-            get
-            {
-                lock (this)
-                {
-                    return segmentInfos.Count;
-                }
-            }
-        }
-
-        // for test purpose
-        internal int NumBufferedDocuments
-        {
-            get
-            {
-                lock (this)
-                {
-                    return docWriter.NumDocs;
-                }
-            }
-        }
-
-        // for test purpose
-        internal ICollection<string> IndexFileNames
-        {
-            get
-            {
-                lock (this)
-                {
-                    return segmentInfos.Files(directory, true);
-                }
-            }
-        }
-
-        // for test purpose
-        internal int GetDocCount(int i)
-        {
-            lock (this)
-            {
-                if (i >= 0 && i < segmentInfos.Count)
-                {
-                    return segmentInfos.Info(i).Info.DocCount;
-                }
-                else
-                {
-                    return -1;
-                }
-            }
-        }
-
-        // for test purpose
-        internal int FlushCount
-        {
-            get
-            {
-                return flushCount.Get();
-            }
-        }
-
-        // for test purpose
-        internal int FlushDeletesCount
-        {
-            get
-            {
-                return flushDeletesCount.Get();
-            }
-        }
-
-        internal string NewSegmentName()
-        {
-            // Cannot synchronize on IndexWriter because that causes
-            // deadlock
-            lock (segmentInfos)


<TRUNCATED>

Mime
View raw message