lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [22/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:28 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/CoalescedUpdates.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/CoalescedUpdates.cs b/src/Lucene.Net.Core/Index/CoalescedUpdates.cs
deleted file mode 100644
index 1892213..0000000
--- a/src/Lucene.Net.Core/Index/CoalescedUpdates.cs
+++ /dev/null
@@ -1,179 +0,0 @@
-using Lucene.Net.Util;
-using System;
-using System.Collections;
-using System.Collections.Generic;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using BinaryDocValuesUpdate = Lucene.Net.Index.DocValuesUpdate.BinaryDocValuesUpdate;
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using NumericDocValuesUpdate = Lucene.Net.Index.DocValuesUpdate.NumericDocValuesUpdate;
-    using Query = Lucene.Net.Search.Query;
-    using QueryAndLimit = Lucene.Net.Index.BufferedUpdatesStream.QueryAndLimit;
-
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal class CoalescedUpdates
-    {
-        internal readonly IDictionary<Query, int> queries = new Dictionary<Query, int>();
-        internal readonly IList<IEnumerable<Term>> iterables = new List<IEnumerable<Term>>();
-        internal readonly IList<NumericDocValuesUpdate> numericDVUpdates = new List<NumericDocValuesUpdate>();
-        internal readonly IList<BinaryDocValuesUpdate> binaryDVUpdates = new List<BinaryDocValuesUpdate>();
-
-        public override string ToString()
-        {
-            // note: we could add/collect more debugging information
-            return "CoalescedUpdates(termSets=" + iterables.Count + ",queries=" + queries.Count + ",numericDVUpdates=" + numericDVUpdates.Count + ",binaryDVUpdates=" + binaryDVUpdates.Count + ")";
-        }
-
-        internal virtual void Update(FrozenBufferedUpdates @in)
-        {
-            iterables.Add(@in.GetTermsEnumerable());
-
-            for (int queryIdx = 0; queryIdx < @in.queries.Length; queryIdx++)
-            {
-                Query query = @in.queries[queryIdx];
-                queries[query] = BufferedUpdates.MAX_INT32;
-            }
-
-            foreach (NumericDocValuesUpdate nu in @in.numericDVUpdates)
-            {
-                NumericDocValuesUpdate clone = new NumericDocValuesUpdate(nu.term, nu.field, (long?)nu.value);
-                clone.docIDUpto = int.MaxValue;
-                numericDVUpdates.Add(clone);
-            }
-
-            foreach (BinaryDocValuesUpdate bu in @in.binaryDVUpdates)
-            {
-                BinaryDocValuesUpdate clone = new BinaryDocValuesUpdate(bu.term, bu.field, (BytesRef)bu.value);
-                clone.docIDUpto = int.MaxValue;
-                binaryDVUpdates.Add(clone);
-            }
-        }
-
-        public virtual IEnumerable<Term> TermsIterable()
-        {
-            return new IterableAnonymousInnerClassHelper(this);
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        private class IterableAnonymousInnerClassHelper : IEnumerable<Term>
-        {
-            private readonly CoalescedUpdates outerInstance;
-
-            public IterableAnonymousInnerClassHelper(CoalescedUpdates outerInstance)
-            {
-                this.outerInstance = outerInstance;
-            }
-
-            public virtual IEnumerator<Term> GetEnumerator()
-            {
-                IEnumerator<Term>[] subs = new IEnumerator<Term>[outerInstance.iterables.Count];
-                for (int i = 0; i < outerInstance.iterables.Count; i++)
-                {
-                    subs[i] = outerInstance.iterables[i].GetEnumerator();
-                }
-                return new MergedIterator<Term>(subs);
-            }
-
-            IEnumerator IEnumerable.GetEnumerator()
-            {
-                return GetEnumerator();
-            }
-        }
-
-        public virtual IEnumerable<QueryAndLimit> QueriesIterable()
-        {
-            return new IterableAnonymousInnerClassHelper2(this);
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        private class IterableAnonymousInnerClassHelper2 : IEnumerable<QueryAndLimit>
-        {
-            private readonly CoalescedUpdates outerInstance;
-
-            public IterableAnonymousInnerClassHelper2(CoalescedUpdates outerInstance)
-            {
-                this.outerInstance = outerInstance;
-            }
-
-            public virtual IEnumerator<QueryAndLimit> GetEnumerator()
-            {
-                return new IteratorAnonymousInnerClassHelper(this);
-            }
-
-            IEnumerator IEnumerable.GetEnumerator()
-            {
-                return GetEnumerator();
-            }
-
-#if FEATURE_SERIALIZABLE
-            [Serializable]
-#endif
-            private class IteratorAnonymousInnerClassHelper : IEnumerator<QueryAndLimit>
-            {
-                private readonly IterableAnonymousInnerClassHelper2 outerInstance;
-                private readonly IEnumerator<KeyValuePair<Query, int>> iter;
-                private QueryAndLimit current;
-
-                public IteratorAnonymousInnerClassHelper(IterableAnonymousInnerClassHelper2 outerInstance)
-                {
-                    this.outerInstance = outerInstance;
-                    iter = this.outerInstance.outerInstance.queries.GetEnumerator();
-                }
-
-                public void Dispose()
-                {
-                }
-
-                public bool MoveNext()
-                {
-                    if (!iter.MoveNext())
-                    {
-                        return false;
-                    }
-                    KeyValuePair<Query, int> ent = iter.Current;
-                    current = new QueryAndLimit(ent.Key, ent.Value);
-                    return true;
-                }
-
-                public void Reset()
-                {
-                    throw new NotSupportedException();
-                }
-
-                public QueryAndLimit Current
-                {
-                    get { return current; }
-                }
-
-                object IEnumerator.Current
-                {
-                    get { return Current; }
-                }
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/CompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/CompositeReader.cs b/src/Lucene.Net.Core/Index/CompositeReader.cs
deleted file mode 100644
index bd5ea20..0000000
--- a/src/Lucene.Net.Core/Index/CompositeReader.cs
+++ /dev/null
@@ -1,127 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Reflection;
-using System.Text;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    // javadocs
-
-    /// <summary>
-    /// Instances of this reader type can only
-    /// be used to get stored fields from the underlying <see cref="AtomicReader"/>s,
-    /// but it is not possible to directly retrieve postings. To do that, get
-    /// the <see cref="AtomicReaderContext"/> for all sub-readers via <see cref="AtomicReaderContext.Leaves"/>.
-    /// Alternatively, you can mimic an <see cref="AtomicReader"/> (with a serious slowdown),
-    /// by wrapping composite readers with <see cref="SlowCompositeReaderWrapper"/>.
-    ///
-    /// <para/><see cref="IndexReader"/> instances for indexes on disk are usually constructed
-    /// with a call to one of the static <c>DirectoryReader.Open()</c> methods,
-    /// e.g. <see cref="DirectoryReader.Open(Store.Directory)"/>. <see cref="DirectoryReader"/> implements
-    /// the <see cref="CompositeReader"/> interface, it is not possible to directly get postings.
-    /// <para/> Concrete subclasses of <see cref="IndexReader"/> are usually constructed with a call to
-    /// one of the static <c>Open()</c> methods, e.g. <see cref="DirectoryReader.Open(Store.Directory)"/>.
-    ///
-    /// <para/> For efficiency, in this API documents are often referred to via
-    /// <i>document numbers</i>, non-negative integers which each name a unique
-    /// document in the index.  These document numbers are ephemeral -- they may change
-    /// as documents are added to and deleted from an index.  Clients should thus not
-    /// rely on a given document having the same number between sessions.
-    ///
-    /// <para/>
-    /// <b>NOTE</b>: 
-    /// <see cref="IndexReader"/> instances are completely thread
-    /// safe, meaning multiple threads can call any of its methods,
-    /// concurrently.  If your application requires external
-    /// synchronization, you should <b>not</b> synchronize on the
-    /// <see cref="IndexReader"/> instance; use your own
-    /// (non-Lucene) objects instead.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class CompositeReader : IndexReader
-    {
-        private volatile CompositeReaderContext readerContext = null; // lazy init
-
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass
-        /// constructors, typically implicit.)
-        /// </summary>
-        protected internal CompositeReader()
-            : base()
-        {
-        }
-
-        public override string ToString()
-        {
-            StringBuilder buffer = new StringBuilder();
-            // walk up through class hierarchy to get a non-empty simple name (anonymous classes have no name):
-            for (Type clazz = this.GetType(); clazz != null; clazz = clazz.GetTypeInfo().BaseType)
-            {
-                if (clazz.Name != null)
-                {
-                    buffer.Append(clazz.Name);
-                    break;
-                }
-            }
-            buffer.Append('(');
-            var subReaders = GetSequentialSubReaders();
-            Debug.Assert(subReaders != null);
-            if (subReaders.Count > 0)
-            {
-                buffer.Append(subReaders[0]);
-                for (int i = 1, c = subReaders.Count; i < c; ++i)
-                {
-                    buffer.Append(" ").Append(subReaders[i]);
-                }
-            }
-            buffer.Append(')');
-            return buffer.ToString();
-        }
-
-        /// <summary>
-        /// Expert: returns the sequential sub readers that this
-        /// reader is logically composed of. This method may not
-        /// return <c>null</c>.
-        ///
-        /// <para/><b>NOTE:</b> In contrast to previous Lucene versions this method
-        /// is no longer public, code that wants to get all <see cref="AtomicReader"/>s
-        /// this composite is composed of should use <see cref="IndexReader.Leaves"/>. </summary>
-        /// <seealso cref="IndexReader.Leaves"/>
-        protected internal abstract IList<IndexReader> GetSequentialSubReaders();
-
-        public override sealed IndexReaderContext Context
-        {
-            get
-            {
-                EnsureOpen();
-                // lazy init without thread safety for perf reasons: Building the readerContext twice does not hurt!
-                if (readerContext == null)
-                {
-                    Debug.Assert(GetSequentialSubReaders() != null);
-                    readerContext = CompositeReaderContext.Create(this);
-                }
-                return readerContext;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/CompositeReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/CompositeReaderContext.cs b/src/Lucene.Net.Core/Index/CompositeReaderContext.cs
deleted file mode 100644
index 76c17e2..0000000
--- a/src/Lucene.Net.Core/Index/CompositeReaderContext.cs
+++ /dev/null
@@ -1,146 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// <see cref="IndexReaderContext"/> for <see cref="CompositeReader"/> instance.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public sealed class CompositeReaderContext : IndexReaderContext
-    {
-        private readonly IList<IndexReaderContext> children;
-        private readonly IList<AtomicReaderContext> leaves;
-        private readonly CompositeReader reader;
-
-        internal static CompositeReaderContext Create(CompositeReader reader)
-        {
-            return (new Builder(reader)).Build();
-        }
-
-        /// <summary>
-        /// Creates a <see cref="CompositeReaderContext"/> for intermediate readers that aren't
-        /// not top-level readers in the current context
-        /// </summary>
-        internal CompositeReaderContext(CompositeReaderContext parent, CompositeReader reader, int ordInParent, int docbaseInParent, IList<IndexReaderContext> children)
-            : this(parent, reader, ordInParent, docbaseInParent, children, null)
-        {
-        }
-
-        /// <summary>
-        /// Creates a <see cref="CompositeReaderContext"/> for top-level readers with parent set to <c>null</c>
-        /// </summary>
-        internal CompositeReaderContext(CompositeReader reader, IList<IndexReaderContext> children, IList<AtomicReaderContext> leaves)
-            : this(null, reader, 0, 0, children, leaves)
-        {
-        }
-
-        private CompositeReaderContext(CompositeReaderContext parent, CompositeReader reader, int ordInParent, int docbaseInParent, IList<IndexReaderContext> children, IList<AtomicReaderContext> leaves)
-            : base(parent, ordInParent, docbaseInParent)
-        {
-            this.children = Collections.UnmodifiableList(children);
-            this.leaves = leaves;
-            this.reader = reader;
-        }
-
-        public override IList<AtomicReaderContext> Leaves
-        {
-            get
-            {
-                if (!IsTopLevel)
-                {
-                    throw new System.NotSupportedException("this is not a top-level context.");
-                }
-                Debug.Assert(leaves != null);
-                return leaves;
-            }
-        }
-
-        public override IList<IndexReaderContext> Children
-        {
-            get { return children; }
-        }
-
-        public override IndexReader Reader
-        {
-            get { return reader; }
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public sealed class Builder
-        {
-            private readonly CompositeReader reader;
-            private readonly IList<AtomicReaderContext> leaves = new List<AtomicReaderContext>();
-            private int leafDocBase = 0;
-
-            public Builder(CompositeReader reader)
-            {
-                this.reader = reader;
-            }
-
-            public CompositeReaderContext Build()
-            {
-                return (CompositeReaderContext)Build(null, reader, 0, 0);
-            }
-
-            internal IndexReaderContext Build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase)
-            {
-                var ar = reader as AtomicReader;
-                if (ar != null)
-                {
-                    var atomic = new AtomicReaderContext(parent, ar, ord, docBase, leaves.Count, leafDocBase);
-                    leaves.Add(atomic);
-                    leafDocBase += reader.MaxDoc;
-                    return atomic;
-                }
-                else
-                {
-                    CompositeReader cr = (CompositeReader)reader;
-                    var sequentialSubReaders = cr.GetSequentialSubReaders();
-                    var children = Arrays.AsList(new IndexReaderContext[sequentialSubReaders.Count]);
-                    CompositeReaderContext newParent;
-                    if (parent == null)
-                    {
-                        newParent = new CompositeReaderContext(cr, children, leaves);
-                    }
-                    else
-                    {
-                        newParent = new CompositeReaderContext(parent, cr, ord, docBase, children);
-                    }
-                    int newDocBase = 0;
-                    for (int i = 0, c = sequentialSubReaders.Count; i < c; i++)
-                    {
-                        IndexReader r = sequentialSubReaders[i];
-                        children[i] = Build(newParent, r, i, newDocBase);
-                        newDocBase += r.MaxDoc;
-                    }
-                    Debug.Assert(newDocBase == cr.MaxDoc);
-                    return newParent;
-                }
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/ConcurrentMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/ConcurrentMergeScheduler.cs b/src/Lucene.Net.Core/Index/ConcurrentMergeScheduler.cs
deleted file mode 100644
index cd7daf1..0000000
--- a/src/Lucene.Net.Core/Index/ConcurrentMergeScheduler.cs
+++ /dev/null
@@ -1,771 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Generic;
-using System.Text;
-using System.Threading;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using CollectionUtil = Lucene.Net.Util.CollectionUtil;
-    using Directory = Lucene.Net.Store.Directory;
-
-    /// <summary>
-    /// A <see cref="MergeScheduler"/> that runs each merge using a
-    /// separate thread.
-    ///
-    /// <para>Specify the max number of threads that may run at
-    /// once, and the maximum number of simultaneous merges
-    /// with <see cref="SetMaxMergesAndThreads"/>.</para>
-    ///
-    /// <para>If the number of merges exceeds the max number of threads
-    /// then the largest merges are paused until one of the smaller
-    /// merges completes.</para>
-    ///
-    /// <para>If more than <see cref="MaxMergeCount"/> merges are
-    /// requested then this class will forcefully throttle the
-    /// incoming threads by pausing until one more more merges
-    /// complete.</para>
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class ConcurrentMergeScheduler : MergeScheduler, IConcurrentMergeScheduler
-    {
-        private int mergeThreadPriority = -1;
-
-        /// <summary>
-        /// List of currently active <see cref="MergeThread"/>s. </summary>
-        protected internal IList<MergeThread> m_mergeThreads = new List<MergeThread>();
-
-        /// <summary>
-        /// Default <see cref="MaxThreadCount"/>.
-        /// We default to 1: tests on spinning-magnet drives showed slower
-        /// indexing performance if more than one merge thread runs at
-        /// once (though on an SSD it was faster)
-        /// </summary>
-        public const int DEFAULT_MAX_THREAD_COUNT = 1;
-
-        /// <summary>
-        /// Default <see cref="MaxMergeCount"/>. </summary>
-        public const int DEFAULT_MAX_MERGE_COUNT = 2;
-
-        // Max number of merge threads allowed to be running at
-        // once.  When there are more merges then this, we
-        // forcefully pause the larger ones, letting the smaller
-        // ones run, up until maxMergeCount merges at which point
-        // we forcefully pause incoming threads (that presumably
-        // are the ones causing so much merging).
-        private int maxThreadCount = DEFAULT_MAX_THREAD_COUNT;
-
-        // Max number of merges we accept before forcefully
-        // throttling the incoming threads
-        private int maxMergeCount = DEFAULT_MAX_MERGE_COUNT;
-
-        /// <summary>
-        /// <see cref="Directory"/> that holds the index. </summary>
-        protected internal Directory m_dir;
-
-        /// <summary>
-        /// <see cref="IndexWriter"/> that owns this instance. </summary>
-        protected internal IndexWriter m_writer;
-
-        /// <summary>
-        /// How many <see cref="MergeThread"/>s have kicked off (this is use
-        /// to name them).
-        /// </summary>
-        protected internal int m_mergeThreadCount;
-
-        /// <summary>
-        /// Sole constructor, with all settings set to default
-        /// values.
-        /// </summary>
-        public ConcurrentMergeScheduler()
-        {
-        }
-
-        /// <summary>
-        /// Sets the maximum number of merge threads and simultaneous merges allowed.
-        /// </summary>
-        /// <param name="maxMergeCount"> the max # simultaneous merges that are allowed.
-        ///       If a merge is necessary yet we already have this many
-        ///       threads running, the incoming thread (that is calling
-        ///       add/updateDocument) will block until a merge thread
-        ///       has completed.  Note that we will only run the
-        ///       smallest <paramref name="maxThreadCount"/> merges at a time. </param>
-        /// <param name="maxThreadCount"> The max # simultaneous merge threads that should
-        ///       be running at once.  This must be &lt;= <paramref name="maxMergeCount"/> </param>
-        public virtual void SetMaxMergesAndThreads(int maxMergeCount, int maxThreadCount)
-        {
-            if (maxThreadCount < 1)
-            {
-                throw new System.ArgumentException("maxThreadCount should be at least 1");
-            }
-            if (maxMergeCount < 1)
-            {
-                throw new System.ArgumentException("maxMergeCount should be at least 1");
-            }
-            if (maxThreadCount > maxMergeCount)
-            {
-                throw new System.ArgumentException("maxThreadCount should be <= maxMergeCount (= " + maxMergeCount + ")");
-            }
-            this.maxThreadCount = maxThreadCount;
-            this.maxMergeCount = maxMergeCount;
-        }
-
-        /// <summary>
-        /// Returns <see cref="maxThreadCount"/>.
-        /// </summary>
-        /// <seealso cref="SetMaxMergesAndThreads(int, int)"/>
-        public virtual int MaxThreadCount
-        {
-            get
-            {
-                return maxThreadCount;
-            }
-        }
-
-        /// <summary>
-        /// See <see cref="SetMaxMergesAndThreads(int, int)"/>. </summary>
-        public virtual int MaxMergeCount
-        {
-            get
-            {
-                return maxMergeCount;
-            }
-        }
-
-        /// <summary>
-        /// Return the priority that merge threads run at.  By
-        /// default the priority is 1 plus the priority of (ie,
-        /// slightly higher priority than) the first thread that
-        /// calls merge.
-        /// </summary>
-        public virtual int MergeThreadPriority
-        {
-            get
-            {
-                lock (this)
-                {
-                    InitMergeThreadPriority();
-                    return mergeThreadPriority;
-                }
-            }
-        }
-
-        /// <summary>
-        /// Set the base priority that merge threads run at.
-        /// Note that CMS may increase priority of some merge
-        /// threads beyond this base priority.  It's best not to
-        /// set this any higher than
-        /// <see cref="ThreadPriority.Highest"/>(4)-maxThreadCount, so that CMS has
-        /// room to set relative priority among threads.
-        /// </summary>
-        public virtual void SetMergeThreadPriority(int priority)
-        {
-            lock (this)
-            {
-                if (priority > (int)ThreadPriority.Highest || priority < (int)ThreadPriority.Lowest)
-                {
-                    throw new System.ArgumentException("priority must be in range " + (int)ThreadPriority.Highest + " .. " + (int)ThreadPriority.Lowest + " inclusive");
-                }
-                mergeThreadPriority = priority;
-                UpdateMergeThreads();
-            }
-        }
-
-        /// <summary>
-        /// Sorts <see cref="MergeThread"/>s; larger merges come first. </summary>
-        protected internal static readonly IComparer<MergeThread> compareByMergeDocCount = new ComparerAnonymousInnerClassHelper();
-
-        private class ComparerAnonymousInnerClassHelper : IComparer<MergeThread>
-        {
-            public ComparerAnonymousInnerClassHelper()
-            {
-            }
-
-            public virtual int Compare(MergeThread t1, MergeThread t2)
-            {
-                MergePolicy.OneMerge m1 = t1.CurrentMerge;
-                MergePolicy.OneMerge m2 = t2.CurrentMerge;
-
-                int c1 = m1 == null ? int.MaxValue : m1.TotalDocCount;
-                int c2 = m2 == null ? int.MaxValue : m2.TotalDocCount;
-
-                return c2 - c1;
-            }
-        }
-
-        /// <summary>
-        /// Called whenever the running merges have changed, to pause &amp; unpause
-        /// threads. This method sorts the merge threads by their merge size in
-        /// descending order and then pauses/unpauses threads from first to last --
-        /// that way, smaller merges are guaranteed to run before larger ones.
-        /// </summary>
-        protected virtual void UpdateMergeThreads()
-        {
-            lock (this)
-            {
-                // Only look at threads that are alive & not in the
-                // process of stopping (ie have an active merge):
-                IList<MergeThread> activeMerges = new List<MergeThread>();
-
-                int threadIdx = 0;
-                while (threadIdx < m_mergeThreads.Count)
-                {
-                    MergeThread mergeThread = m_mergeThreads[threadIdx];
-                    if (!mergeThread.IsAlive)
-                    {
-                        // Prune any dead threads
-                        m_mergeThreads.RemoveAt(threadIdx);
-                        continue;
-                    }
-                    if (mergeThread.CurrentMerge != null)
-                    {
-                        activeMerges.Add(mergeThread);
-                    }
-                    threadIdx++;
-                }
-
-                // Sort the merge threads in descending order.
-                CollectionUtil.TimSort(activeMerges, compareByMergeDocCount);
-
-                int pri = mergeThreadPriority;
-                int activeMergeCount = activeMerges.Count;
-                for (threadIdx = 0; threadIdx < activeMergeCount; threadIdx++)
-                {
-                    MergeThread mergeThread = activeMerges[threadIdx];
-                    MergePolicy.OneMerge merge = mergeThread.CurrentMerge;
-                    if (merge == null)
-                    {
-                        continue;
-                    }
-
-                    // pause the thread if maxThreadCount is smaller than the number of merge threads.
-                    bool doPause = threadIdx < activeMergeCount - maxThreadCount;
-
-                    if (IsVerbose)
-                    {
-                        if (doPause != merge.IsPaused)
-                        {
-                            if (doPause)
-                            {
-                                Message("pause thread " + mergeThread.Name);
-                            }
-                            else
-                            {
-                                Message("unpause thread " + mergeThread.Name);
-                            }
-                        }
-                    }
-                    if (doPause != merge.IsPaused)
-                    {
-                        merge.SetPause(doPause);
-                    }
-
-                    if (!doPause)
-                    {
-                        if (IsVerbose)
-                        {
-                            Message("set priority of merge thread " + mergeThread.Name + " to " + pri);
-                        }
-                        mergeThread.SetThreadPriority((ThreadPriority)pri);
-                        pri = Math.Min((int)ThreadPriority.Highest, 1 + pri);
-                    }
-                }
-            }
-        }
-
-        /// <summary>
-        /// Returns <c>true</c> if verbosing is enabled. This method is usually used in
-        /// conjunction with <see cref="Message(String)"/>, like that:
-        ///
-        /// <code>
-        /// if (IsVerbose) 
-        /// {
-        ///     Message(&quot;your message&quot;);
-        /// }
-        /// </code>
-        /// </summary>
-        protected virtual bool IsVerbose
-        {
-            get { return m_writer != null && m_writer.infoStream.IsEnabled("CMS"); }
-        }
-
-        /// <summary>
-        /// Outputs the given message - this method assumes <see cref="IsVerbose"/> was
-        /// called and returned <c>true</c>.
-        /// </summary>
-        protected internal virtual void Message(string message)
-        {
-            m_writer.infoStream.Message("CMS", message);
-        }
-
-        private void InitMergeThreadPriority()
-        {
-            lock (this)
-            {
-                if (mergeThreadPriority == -1)
-                {
-                    // Default to slightly higher priority than our
-                    // calling thread
-                    mergeThreadPriority = 1 + (int)ThreadClass.Current().Priority;
-                    if (mergeThreadPriority > (int)ThreadPriority.Highest)
-                    {
-                        mergeThreadPriority = (int)ThreadPriority.Highest;
-                    }
-                }
-            }
-        }
-
-        protected override void Dispose(bool disposing)
-        {
-            Sync();
-        }
-
-        /// <summary>
-        /// Wait for any running merge threads to finish. This call is not interruptible as used by <see cref="Dispose(bool)"/>. </summary>
-        public virtual void Sync()
-        {
-            bool interrupted = false;
-            try
-            {
-                while (true)
-                {
-                    MergeThread toSync = null;
-                    lock (this)
-                    {
-                        foreach (MergeThread t in m_mergeThreads)
-                        {
-                            if (t.IsAlive)
-                            {
-                                toSync = t;
-                                break;
-                            }
-                        }
-                    }
-                    if (toSync != null)
-                    {
-                        try
-                        {
-                            toSync.Join();
-                        }
-#pragma warning disable 168
-                        catch (ThreadInterruptedException ie)
-#pragma warning restore 168
-                        {
-                            // ignore this Exception, we will retry until all threads are dead
-                            interrupted = true;
-                        }
-                    }
-                    else
-                    {
-                        break;
-                    }
-                }
-            }
-            finally
-            {
-                // finally, restore interrupt status:
-                if (interrupted)
-                {
-                    Thread.CurrentThread.Interrupt();
-                }
-            }
-        }
-
-        /// <summary>
-        /// Returns the number of merge threads that are alive. Note that this number
-        /// is &lt;= <see cref="m_mergeThreads"/> size.
-        /// </summary>
-        protected virtual int MergeThreadCount
-        {
-            get
-            {
-                lock (this)
-                {
-                    int count = 0;
-                    foreach (MergeThread mt in m_mergeThreads)
-                    {
-                        if (mt.IsAlive && mt.CurrentMerge != null)
-                        {
-                            count++;
-                        }
-                    }
-                    return count;
-                }
-            }
-        }
-
-        public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound)
-        {
-            lock (this)
-            {
-                //Debug.Assert(!Thread.holdsLock(writer));
-
-                this.m_writer = writer;
-
-                InitMergeThreadPriority();
-
-                m_dir = writer.Directory;
-
-                // First, quickly run through the newly proposed merges
-                // and add any orthogonal merges (ie a merge not
-                // involving segments already pending to be merged) to
-                // the queue.  If we are way behind on merging, many of
-                // these newly proposed merges will likely already be
-                // registered.
-
-                if (IsVerbose)
-                {
-                    Message("now merge");
-                    Message("  index: " + writer.SegString());
-                }
-
-                // Iterate, pulling from the IndexWriter's queue of
-                // pending merges, until it's empty:
-                while (true)
-                {
-                    long startStallTime = 0;
-                    while (writer.HasPendingMerges() && MergeThreadCount >= maxMergeCount)
-                    {
-                        // this means merging has fallen too far behind: we
-                        // have already created maxMergeCount threads, and
-                        // now there's at least one more merge pending.
-                        // Note that only maxThreadCount of
-                        // those created merge threads will actually be
-                        // running; the rest will be paused (see
-                        // updateMergeThreads).  We stall this producer
-                        // thread to prevent creation of new segments,
-                        // until merging has caught up:
-                        startStallTime = Environment.TickCount;
-                        if (IsVerbose)
-                        {
-                            Message("    too many merges; stalling...");
-                        }
-                        try
-                        {
-                            Monitor.Wait(this);
-                        }
-                        catch (ThreadInterruptedException ie)
-                        {
-                            throw new ThreadInterruptedException(ie.ToString(), ie);
-                        }
-                    }
-
-                    if (IsVerbose)
-                    {
-                        if (startStallTime != 0)
-                        {
-                            Message("  stalled for " + (Environment.TickCount - startStallTime) + " msec");
-                        }
-                    }
-
-                    MergePolicy.OneMerge merge = writer.NextMerge();
-                    if (merge == null)
-                    {
-                        if (IsVerbose)
-                        {
-                            Message("  no more merges pending; now return");
-                        }
-                        return;
-                    }
-
-                    bool success = false;
-                    try
-                    {
-                        if (IsVerbose)
-                        {
-                            Message("  consider merge " + writer.SegString(merge.Segments));
-                        }
-
-                        // OK to spawn a new merge thread to handle this
-                        // merge:
-                        MergeThread merger = GetMergeThread(writer, merge);
-                        m_mergeThreads.Add(merger);
-                        if (IsVerbose)
-                        {
-                            Message("    launch new thread [" + merger.Name + "]");
-                        }
-
-                        merger.Start();
-
-                        // Must call this after starting the thread else
-                        // the new thread is removed from mergeThreads
-                        // (since it's not alive yet):
-                        UpdateMergeThreads();
-
-                        success = true;
-                    }
-                    finally
-                    {
-                        if (!success)
-                        {
-                            writer.MergeFinish(merge);
-                        }
-                    }
-                }
-            }
-        }
-
-        /// <summary>
-        /// Does the actual merge, by calling <see cref="IndexWriter.Merge(MergePolicy.OneMerge)"/> </summary>
-        protected virtual void DoMerge(MergePolicy.OneMerge merge)
-        {
-            m_writer.Merge(merge);
-        }
-
-        /// <summary>
-        /// Create and return a new <see cref="MergeThread"/> </summary>
-        protected virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge)
-        {
-            lock (this)
-            {
-                MergeThread thread = new MergeThread(this, writer, merge);
-                thread.SetThreadPriority((ThreadPriority)mergeThreadPriority);
-                thread.IsBackground = true;
-                thread.Name = "Lucene Merge Thread #" + m_mergeThreadCount++;
-                return thread;
-            }
-        }
-
-        /// <summary>
-        /// Runs a merge thread, which may run one or more merges
-        /// in sequence.
-        /// </summary>
-        protected internal class MergeThread : ThreadClass//System.Threading.Thread
-        {
-            private readonly ConcurrentMergeScheduler outerInstance;
-
-            internal IndexWriter tWriter;
-            internal MergePolicy.OneMerge startMerge;
-            internal MergePolicy.OneMerge runningMerge;
-            private volatile bool done;
-
-            /// <summary>
-            /// Sole constructor. </summary>
-            public MergeThread(ConcurrentMergeScheduler outerInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
-            {
-                this.outerInstance = outerInstance;
-                this.tWriter = writer;
-                this.startMerge = startMerge;
-            }
-
-            /// <summary>
-            /// Record the currently running merge. </summary>
-            public virtual MergePolicy.OneMerge RunningMerge
-            {
-                set
-                {
-                    lock (this)
-                    {
-                        runningMerge = value;
-                    }
-                }
-                get
-                {
-                    lock (this)
-                    {
-                        return runningMerge;
-                    }
-                }
-            }
-
-            /// <summary>
-            /// Return the current merge, or <c>null</c> if this 
-            /// <see cref="MergeThread"/> is done.
-            /// </summary>
-            public virtual MergePolicy.OneMerge CurrentMerge
-            {
-                get
-                {
-                    lock (this)
-                    {
-                        if (done)
-                        {
-                            return null;
-                        }
-                        else if (runningMerge != null)
-                        {
-                            return runningMerge;
-                        }
-                        else
-                        {
-                            return startMerge;
-                        }
-                    }
-                }
-            }
-
-            /// <summary>
-            /// Set the priority of this thread. </summary>
-            public virtual void SetThreadPriority(ThreadPriority priority)
-            {
-                try
-                {
-                    Priority = priority;
-                }
-#pragma warning disable 168
-                catch (System.NullReferenceException npe)
-                {
-                    // Strangely, Sun's JDK 1.5 on Linux sometimes
-                    // throws NPE out of here...
-                }
-                catch (System.Security.SecurityException se)
-#pragma warning restore 168
-                {
-                    // Ignore this because we will still run fine with
-                    // normal thread priority
-                }
-            }
-
-            public override void Run()
-            {
-                // First time through the while loop we do the merge
-                // that we were started with:
-                MergePolicy.OneMerge merge = this.startMerge;
-
-                try
-                {
-                    if (outerInstance.IsVerbose)
-                    {
-                        outerInstance.Message("  merge thread: start");
-                    }
-
-                    while (true)
-                    {
-                        RunningMerge = merge;
-                        outerInstance.DoMerge(merge);
-
-                        // Subsequent times through the loop we do any new
-                        // merge that writer says is necessary:
-                        merge = tWriter.NextMerge();
-
-                        // Notify here in case any threads were stalled;
-                        // they will notice that the pending merge has
-                        // been pulled and possibly resume:
-                        lock (outerInstance)
-                        {
-                            Monitor.PulseAll(outerInstance);
-                        }
-
-                        if (merge != null)
-                        {
-                            outerInstance.UpdateMergeThreads();
-                            if (outerInstance.IsVerbose)
-                            {
-                                outerInstance.Message("  merge thread: do another merge " + tWriter.SegString(merge.Segments));
-                            }
-                        }
-                        else
-                        {
-                            break;
-                        }
-                    }
-
-                    if (outerInstance.IsVerbose)
-                    {
-                        outerInstance.Message("  merge thread: done");
-                    }
-                }
-                catch (Exception exc)
-                {
-                    // Ignore the exception if it was due to abort:
-                    if (!(exc is MergePolicy.MergeAbortedException))
-                    {
-                        //System.out.println(Thread.currentThread().getName() + ": CMS: exc");
-                        //exc.printStackTrace(System.out);
-                        if (!outerInstance.suppressExceptions)
-                        {
-                            // suppressExceptions is normally only set during
-                            // testing.
-                            outerInstance.HandleMergeException(exc);
-                        }
-                    }
-                }
-                finally
-                {
-                    done = true;
-                    lock (outerInstance)
-                    {
-                        outerInstance.UpdateMergeThreads();
-                        Monitor.PulseAll(outerInstance);
-                    }
-                }
-            }
-        }
-
-        /// <summary>
-        /// Called when an exception is hit in a background merge
-        /// thread
-        /// </summary>
-        protected virtual void HandleMergeException(Exception exc)
-        {
-            try
-            {
-                // When an exception is hit during merge, IndexWriter
-                // removes any partial files and then allows another
-                // merge to run.  If whatever caused the error is not
-                // transient then the exception will keep happening,
-                // so, we sleep here to avoid saturating CPU in such
-                // cases:
-                Thread.Sleep(250);
-            }
-            catch (ThreadInterruptedException ie)
-            {
-                throw new ThreadInterruptedException("Thread Interrupted Exception", ie);
-            }
-            throw new MergePolicy.MergeException(exc, m_dir);
-        }
-
-        private bool suppressExceptions;
-
-        /// <summary>
-        /// Used for testing </summary>
-        public virtual void SetSuppressExceptions()
-        {
-            suppressExceptions = true;
-        }
-
-        /// <summary>
-        /// Used for testing </summary>
-        public virtual void ClearSuppressExceptions()
-        {
-            suppressExceptions = false;
-        }
-
-        public override string ToString()
-        {
-            StringBuilder sb = new StringBuilder(this.GetType().Name + ": ");
-            sb.Append("maxThreadCount=").Append(maxThreadCount).Append(", ");
-            sb.Append("maxMergeCount=").Append(maxMergeCount).Append(", ");
-            sb.Append("mergeThreadPriority=").Append(mergeThreadPriority);
-            return sb.ToString();
-        }
-
-        public override IMergeScheduler Clone()
-        {
-            ConcurrentMergeScheduler clone = (ConcurrentMergeScheduler)base.Clone();
-            clone.m_writer = null;
-            clone.m_dir = null;
-            clone.m_mergeThreads = new List<MergeThread>();
-            return clone;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/CorruptIndexException.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/CorruptIndexException.cs b/src/Lucene.Net.Core/Index/CorruptIndexException.cs
deleted file mode 100644
index d4c1f58..0000000
--- a/src/Lucene.Net.Core/Index/CorruptIndexException.cs
+++ /dev/null
@@ -1,62 +0,0 @@
-using System;
-using System.IO;
-#if FEATURE_SERIALIZABLE
-using System.Runtime.Serialization;
-#endif
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// This exception is thrown when Lucene detects
-    /// an inconsistency in the index.
-    /// </summary>
-    // LUCENENET: All exeption classes should be marked serializable
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class CorruptIndexException : IOException // LUCENENENET specific - made public instead of internal because there are public subclasses
-    {
-        /// <summary>
-        /// Constructor. </summary>
-        public CorruptIndexException(string message)
-            : base(message)
-        {
-        }
-
-        /// <summary>
-        /// Constructor. </summary>
-        public CorruptIndexException(string message, Exception ex) 
-            : base(message, ex)
-        {
-        }
-
-#if FEATURE_SERIALIZABLE
-        /// <summary>
-        /// Initializes a new instance of this class with serialized data.
-        /// </summary>
-        /// <param name="info">The <see cref="SerializationInfo"/> that holds the serialized object data about the exception being thrown.</param>
-        /// <param name="context">The <see cref="StreamingContext"/> that contains contextual information about the source or destination.</param>
-        public CorruptIndexException(SerializationInfo info, StreamingContext context)
-            : base(info, context)
-        {
-        }
-#endif
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DirectoryReader.cs b/src/Lucene.Net.Core/Index/DirectoryReader.cs
deleted file mode 100644
index d3cf4de..0000000
--- a/src/Lucene.Net.Core/Index/DirectoryReader.cs
+++ /dev/null
@@ -1,493 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.IO;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    // javadocs
-    using Directory = Lucene.Net.Store.Directory;
-
-    /// <summary>
-    /// <see cref="DirectoryReader"/> is an implementation of <see cref="CompositeReader"/>
-    /// that can read indexes in a <see cref="Store.Directory"/>.
-    ///
-    /// <para/><see cref="DirectoryReader"/> instances are usually constructed with a call to
-    /// one of the static <c>Open()</c> methods, e.g. <see cref="Open(Directory)"/>.
-    ///
-    /// <para/> For efficiency, in this API documents are often referred to via
-    /// <i>document numbers</i>, non-negative integers which each name a unique
-    /// document in the index.  These document numbers are ephemeral -- they may change
-    /// as documents are added to and deleted from an index.  Clients should thus not
-    /// rely on a given document having the same number between sessions.
-    ///
-    /// <para/>
-    /// <p><b>NOTE</b>:
-    /// <see cref="IndexReader"/> instances are completely thread
-    /// safe, meaning multiple threads can call any of its methods,
-    /// concurrently.  If your application requires external
-    /// synchronization, you should <b>not</b> synchronize on the
-    /// <see cref="IndexReader"/> instance; use your own
-    /// (non-Lucene) objects instead.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class DirectoryReader : BaseCompositeReader<AtomicReader>
-    {
-        /// <summary>
-        /// Default termInfosIndexDivisor. </summary>
-        public static readonly int DEFAULT_TERMS_INDEX_DIVISOR = 1;
-
-        /// <summary>
-        /// The index directory. </summary>
-        protected readonly Directory m_directory;
-
-        /// <summary>
-        /// Returns a <see cref="IndexReader"/> reading the index in the given
-        /// <see cref="Store.Directory"/> </summary>
-        /// <param name="directory"> the index directory </param>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        new public static DirectoryReader Open(Directory directory)
-        {
-            return StandardDirectoryReader.Open(directory, null, DEFAULT_TERMS_INDEX_DIVISOR);
-        }
-
-        /// <summary>
-        /// Expert: Returns a <see cref="IndexReader"/> reading the index in the given
-        /// <see cref="Store.Directory"/> with the given termInfosIndexDivisor. </summary>
-        /// <param name="directory"> the index directory </param>
-        /// <param name="termInfosIndexDivisor"> Subsamples which indexed
-        /// terms are loaded into RAM. this has the same effect as setting
-        /// <see cref="LiveIndexWriterConfig.TermIndexInterval"/> (on <see cref="IndexWriterConfig"/>) except that setting
-        /// must be done at indexing time while this setting can be
-        /// set per reader.  When set to N, then one in every
-        /// N*termIndexInterval terms in the index is loaded into
-        /// memory.  By setting this to a value &gt; 1 you can reduce
-        /// memory usage, at the expense of higher latency when
-        /// loading a TermInfo.  The default value is 1.  Set this
-        /// to -1 to skip loading the terms index entirely.
-        /// <b>NOTE:</b> divisor settings &gt; 1 do not apply to all <see cref="Codecs.PostingsFormat"/>
-        /// implementations, including the default one in this release. It only makes
-        /// sense for terms indexes that can efficiently re-sample terms at load time. </param>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        new public static DirectoryReader Open(Directory directory, int termInfosIndexDivisor)
-        {
-            return StandardDirectoryReader.Open(directory, null, termInfosIndexDivisor);
-        }
-
-        /// <summary>
-        /// Open a near real time <see cref="IndexReader"/> from the <see cref="IndexWriter"/>.
-        /// <para/>
-        /// @lucene.experimental 
-        /// </summary>
-        /// <param name="writer"> The <see cref="IndexWriter"/> to open from </param>
-        /// <param name="applyAllDeletes"> If <c>true</c>, all buffered deletes will
-        /// be applied (made visible) in the returned reader.  If
-        /// <c>false</c>, the deletes are not applied but remain buffered
-        /// (in IndexWriter) so that they will be applied in the
-        /// future.  Applying deletes can be costly, so if your app
-        /// can tolerate deleted documents being returned you might
-        /// gain some performance by passing <c>false</c>. </param>
-        /// <returns> The new <see cref="IndexReader"/> </returns>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error
-        /// </exception>
-        /// <seealso cref="OpenIfChanged(DirectoryReader, IndexWriter, bool)"/>
-        new public static DirectoryReader Open(IndexWriter writer, bool applyAllDeletes)
-        {
-            return writer.GetReader(applyAllDeletes);
-        }
-
-        /// <summary>
-        /// Expert: returns an <see cref="IndexReader"/> reading the index in the given
-        /// <see cref="Index.IndexCommit"/>. </summary>
-        /// <param name="commit"> the commit point to open </param>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        new public static DirectoryReader Open(IndexCommit commit)
-        {
-            return StandardDirectoryReader.Open(commit.Directory, commit, DEFAULT_TERMS_INDEX_DIVISOR);
-        }
-
-        /// <summary>
-        /// Expert: returns an <see cref=""/>IndexReader reading the index in the given
-        ///  <seealso cref="IndexCommit"/> and <paramref name="termInfosIndexDivisor"/>. </summary>
-        /// <param name="commit"> the commit point to open </param>
-        /// <param name="termInfosIndexDivisor"> Subsamples which indexed
-        /// terms are loaded into RAM. this has the same effect as setting
-        /// <see cref="LiveIndexWriterConfig.TermIndexInterval"/> (on <see cref="IndexWriterConfig"/>) except that setting
-        /// must be done at indexing time while this setting can be
-        /// set per reader.  When set to N, then one in every
-        /// N*termIndexInterval terms in the index is loaded into
-        /// memory.  By setting this to a value &gt; 1 you can reduce
-        /// memory usage, at the expense of higher latency when
-        /// loading a TermInfo.  The default value is 1.  Set this
-        /// to -1 to skip loading the terms index entirely.
-        /// <b>NOTE:</b> divisor settings &gt; 1 do not apply to all <see cref="Codecs.PostingsFormat"/>
-        /// implementations, including the default one in this release. It only makes
-        /// sense for terms indexes that can efficiently re-sample terms at load time. </param>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        new public static DirectoryReader Open(IndexCommit commit, int termInfosIndexDivisor)
-        {
-            return StandardDirectoryReader.Open(commit.Directory, commit, termInfosIndexDivisor);
-        }
-
-        /// <summary>
-        /// If the index has changed since the provided reader was
-        /// opened, open and return a new reader; else, return
-        /// <c>null</c>.  The new reader, if not <c>null</c>, will be the same
-        /// type of reader as the previous one, ie a near-real-time (NRT) reader
-        /// will open a new NRT reader, a <see cref="MultiReader"/> will open a
-        /// new <see cref="MultiReader"/>,  etc.
-        ///
-        /// <para/>This method is typically far less costly than opening a
-        /// fully new <see cref="DirectoryReader"/> as it shares
-        /// resources (for example sub-readers) with the provided
-        /// <see cref="DirectoryReader"/>, when possible.
-        ///
-        /// <para/>The provided reader is not disposed (you are responsible
-        /// for doing so); if a new reader is returned you also
-        /// must eventually dispose it.  Be sure to never dispose a
-        /// reader while other threads are still using it; see
-        /// <see cref="Search.SearcherManager"/> to simplify managing this.
-        /// </summary>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <returns> <c>null</c> if there are no changes; else, a new
-        /// <see cref="DirectoryReader"/> instance which you must eventually dispose </returns>
-        public static DirectoryReader OpenIfChanged(DirectoryReader oldReader)
-        {
-            DirectoryReader newReader = oldReader.DoOpenIfChanged();
-            Debug.Assert(newReader != oldReader);
-            return newReader;
-        }
-
-        /// <summary>
-        /// If the <see cref="Index.IndexCommit"/> differs from what the
-        /// provided reader is searching, open and return a new
-        /// reader; else, return <c>null</c>.
-        /// </summary>
-        /// <seealso cref="OpenIfChanged(DirectoryReader)"/>
-        public static DirectoryReader OpenIfChanged(DirectoryReader oldReader, IndexCommit commit)
-        {
-            DirectoryReader newReader = oldReader.DoOpenIfChanged(commit);
-            Debug.Assert(newReader != oldReader);
-            return newReader;
-        }
-
-        /// <summary>
-        /// Expert: If there changes (committed or not) in the
-        /// <see cref="IndexWriter"/> versus what the provided reader is
-        /// searching, then open and return a new
-        /// <see cref="IndexReader"/> searching both committed and uncommitted
-        /// changes from the writer; else, return <c>null</c> (though, the
-        /// current implementation never returns <c>null</c>).
-        ///
-        /// <para/>This provides "near real-time" searching, in that
-        /// changes made during an <see cref="IndexWriter"/> session can be
-        /// quickly made available for searching without closing
-        /// the writer nor calling <see cref="IndexWriter.Commit()"/>.
-        ///
-        /// <para>It's <i>near</i> real-time because there is no hard
-        /// guarantee on how quickly you can get a new reader after
-        /// making changes with <see cref="IndexWriter"/>.  You'll have to
-        /// experiment in your situation to determine if it's
-        /// fast enough.  As this is a new and experimental
-        /// feature, please report back on your findings so we can
-        /// learn, improve and iterate.</para>
-        ///
-        /// <para>The very first time this method is called, this
-        /// writer instance will make every effort to pool the
-        /// readers that it opens for doing merges, applying
-        /// deletes, etc.  This means additional resources (RAM,
-        /// file descriptors, CPU time) will be consumed.</para>
-        ///
-        /// <para>For lower latency on reopening a reader, you should
-        /// call <see cref="LiveIndexWriterConfig.MergedSegmentWarmer"/> (on <see cref="IndexWriterConfig"/>) to
-        /// pre-warm a newly merged segment before it's committed
-        /// to the index.  This is important for minimizing
-        /// index-to-search delay after a large merge.  </para>
-        ///
-        /// <para>If an AddIndexes* call is running in another thread,
-        /// then this reader will only search those segments from
-        /// the foreign index that have been successfully copied
-        /// over, so far.</para>
-        ///
-        /// <para><b>NOTE</b>: Once the writer is disposed, any
-        /// outstanding readers may continue to be used.  However,
-        /// if you attempt to reopen any of those readers, you'll
-        /// hit an <see cref="System.ObjectDisposedException"/>.</para>
-        /// 
-        /// @lucene.experimental
-        /// </summary>
-        /// <returns> <see cref="DirectoryReader"/> that covers entire index plus all
-        /// changes made so far by this <see cref="IndexWriter"/> instance, or
-        /// <c>null</c> if there are no new changes
-        /// </returns>
-        /// <param name="writer"> The <see cref="IndexWriter"/> to open from
-        /// </param>
-        /// <param name="applyAllDeletes"> If <c>true</c>, all buffered deletes will
-        /// be applied (made visible) in the returned reader.  If
-        /// <c>false</c>, the deletes are not applied but remain buffered
-        /// (in <see cref="IndexWriter"/>) so that they will be applied in the
-        /// future.  Applying deletes can be costly, so if your app
-        /// can tolerate deleted documents being returned you might
-        /// gain some performance by passing <c>false</c>.
-        /// </param>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public static DirectoryReader OpenIfChanged(DirectoryReader oldReader, IndexWriter writer, bool applyAllDeletes)
-        {
-            DirectoryReader newReader = oldReader.DoOpenIfChanged(writer, applyAllDeletes);
-            Debug.Assert(newReader != oldReader);
-            return newReader;
-        }
-
-        /// <summary>
-        /// Returns all commit points that exist in the <see cref="Store.Directory"/>.
-        /// Normally, because the default is 
-        /// <see cref="KeepOnlyLastCommitDeletionPolicy"/>, there would be only
-        /// one commit point.  But if you're using a custom
-        /// <see cref="IndexDeletionPolicy"/> then there could be many commits.
-        /// Once you have a given commit, you can open a reader on
-        /// it by calling <see cref="DirectoryReader.Open(IndexCommit)"/>
-        /// There must be at least one commit in
-        /// the <see cref="Store.Directory"/>, else this method throws 
-        /// <see cref="IndexNotFoundException"/>.  Note that if a commit is in
-        /// progress while this method is running, that commit
-        /// may or may not be returned.
-        /// </summary>
-        /// <returns> a sorted list of <see cref="Index.IndexCommit"/>s, from oldest
-        /// to latest. </returns>
-        public static IList<IndexCommit> ListCommits(Directory dir)
-        {
-            string[] files = dir.ListAll();
-
-            List<IndexCommit> commits = new List<IndexCommit>();
-
-            SegmentInfos latest = new SegmentInfos();
-            latest.Read(dir);
-            long currentGen = latest.Generation;
-
-            commits.Add(new StandardDirectoryReader.ReaderCommit(latest, dir));
-
-            for (int i = 0; i < files.Length; i++)
-            {
-                string fileName = files[i];
-
-                if (fileName.StartsWith(IndexFileNames.SEGMENTS, StringComparison.Ordinal) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN, StringComparison.Ordinal) && SegmentInfos.GenerationFromSegmentsFileName(fileName) < currentGen)
-                {
-                    SegmentInfos sis = new SegmentInfos();
-                    try
-                    {
-                        // IOException allowed to throw there, in case
-                        // segments_N is corrupt
-                        sis.Read(dir, fileName);
-                    }
-                    catch (FileNotFoundException)
-                    {
-                        // LUCENE-948: on NFS (and maybe others), if
-                        // you have writers switching back and forth
-                        // between machines, it's very likely that the
-                        // dir listing will be stale and will claim a
-                        // file segments_X exists when in fact it
-                        // doesn't.  So, we catch this and handle it
-                        // as if the file does not exist
-                        sis = null;
-                    }
-                    // LUCENENET specific - .NET (thankfully) only has one FileNotFoundException, so we don't need this
-                    //catch (NoSuchFileException)
-                    //{
-                    //    sis = null;
-                    //}
-                    // LUCENENET specific - since NoSuchDirectoryException subclasses FileNotFoundException
-                    // in Lucene, we need to catch it here to be on the safe side.
-                    catch (System.IO.DirectoryNotFoundException)
-                    {
-                        // LUCENE-948: on NFS (and maybe others), if
-                        // you have writers switching back and forth
-                        // between machines, it's very likely that the
-                        // dir listing will be stale and will claim a
-                        // file segments_X exists when in fact it
-                        // doesn't.  So, we catch this and handle it
-                        // as if the file does not exist
-                        sis = null;
-                    }
-
-                    if (sis != null)
-                    {
-                        commits.Add(new StandardDirectoryReader.ReaderCommit(sis, dir));
-                    }
-                }
-            }
-
-            // Ensure that the commit points are sorted in ascending order.
-            commits.Sort();
-
-            return commits;
-        }
-
-        /// <summary>
-        /// Returns <c>true</c> if an index likely exists at
-        /// the specified directory.  Note that if a corrupt index
-        /// exists, or if an index in the process of committing </summary>
-        /// <param name="directory"> the directory to check for an index </param>
-        /// <returns> <c>true</c> if an index exists; <c>false</c> otherwise </returns>
-        public static bool IndexExists(Directory directory)
-        {
-            // LUCENE-2812, LUCENE-2727, LUCENE-4738: this logic will
-            // return true in cases that should arguably be false,
-            // such as only IW.prepareCommit has been called, or a
-            // corrupt first commit, but it's too deadly to make
-            // this logic "smarter" and risk accidentally returning
-            // false due to various cases like file description
-            // exhaustion, access denied, etc., because in that
-            // case IndexWriter may delete the entire index.  It's
-            // safer to err towards "index exists" than try to be
-            // smart about detecting not-yet-fully-committed or
-            // corrupt indices.  this means that IndexWriter will
-            // throw an exception on such indices and the app must
-            // resolve the situation manually:
-            string[] files;
-            try
-            {
-                files = directory.ListAll();
-            }
-#pragma warning disable 168
-            catch (DirectoryNotFoundException nsde)
-#pragma warning restore 168
-            {
-                // Directory does not exist --> no index exists
-                return false;
-            }
-
-            // Defensive: maybe a Directory impl returns null
-            // instead of throwing NoSuchDirectoryException:
-            if (files != null)
-            {
-                string prefix = IndexFileNames.SEGMENTS + "_";
-                foreach (string file in files)
-                {
-                    if (file.StartsWith(prefix, StringComparison.Ordinal) || file.Equals(IndexFileNames.SEGMENTS_GEN, StringComparison.Ordinal))
-                    {
-                        return true;
-                    }
-                }
-            }
-            return false;
-        }
-
-        /// <summary>
-        /// Expert: Constructs a <see cref="DirectoryReader"/> on the given <paramref name="segmentReaders"/>. </summary>
-        /// <param name="segmentReaders"> the wrapped atomic index segment readers. This array is
-        /// returned by <see cref="CompositeReader.GetSequentialSubReaders"/> and used to resolve the correct
-        /// subreader for docID-based methods. <b>Please note:</b> this array is <b>not</b>
-        /// cloned and not protected for modification outside of this reader.
-        /// Subclasses of <see cref="DirectoryReader"/> should take care to not allow
-        /// modification of this internal array, e.g. <see cref="DoOpenIfChanged()"/>. </param>
-        protected DirectoryReader(Directory directory, AtomicReader[] segmentReaders)
-            : base(segmentReaders)
-        {
-            this.m_directory = directory;
-        }
-
-        /// <summary>
-        /// Returns the directory this index resides in. </summary>
-        public Directory Directory
-        {
-            get
-            {
-                // Don't ensureOpen here -- in certain cases, when a
-                // cloned/reopened reader needs to commit, it may call
-                // this method on the closed original reader
-                return m_directory;
-            }
-        }
-
-        /// <summary>
-        /// Implement this method to support <see cref="OpenIfChanged(DirectoryReader)"/>.
-        /// If this reader does not support reopen, return <c>null</c>, so
-        /// client code is happy. This should be consistent with <see cref="IsCurrent()"/>
-        /// (should always return <c>true</c>) if reopen is not supported. </summary>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <returns> <c>null</c> if there are no changes; else, a new
-        /// <see cref="DirectoryReader"/> instance. </returns>
-        protected internal abstract DirectoryReader DoOpenIfChanged();
-
-        /// <summary>
-        /// Implement this method to support <see cref="OpenIfChanged(DirectoryReader, IndexCommit)"/>.
-        /// If this reader does not support reopen from a specific <see cref="Index.IndexCommit"/>,
-        /// throw <see cref="NotSupportedException"/>. </summary>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <returns> <c>null</c> if there are no changes; else, a new
-        /// <see cref="DirectoryReader"/> instance. </returns>
-        protected internal abstract DirectoryReader DoOpenIfChanged(IndexCommit commit);
-
-        /// <summary>
-        /// Implement this method to support <see cref="OpenIfChanged(DirectoryReader, IndexWriter, bool)"/>.
-        /// If this reader does not support reopen from <see cref="IndexWriter"/>,
-        /// throw <see cref="NotSupportedException"/>. </summary>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <returns> <c>null</c> if there are no changes; else, a new
-        /// <see cref="DirectoryReader"/> instance. </returns>
-        protected internal abstract DirectoryReader DoOpenIfChanged(IndexWriter writer, bool applyAllDeletes);
-
-        /// <summary>
-        /// Version number when this <see cref="IndexReader"/> was opened.
-        ///
-        /// <para>This method
-        /// returns the version recorded in the commit that the
-        /// reader opened.  This version is advanced every time
-        /// a change is made with <see cref="IndexWriter"/>.</para>
-        /// </summary>
-        public abstract long Version { get; }
-
-        /// <summary>
-        /// Check whether any new changes have occurred to the
-        /// index since this reader was opened.
-        ///
-        /// <para>If this reader was created by calling <see cref="Open"/>,
-        /// then this method checks if any further commits
-        /// (see <see cref="IndexWriter.Commit()"/>) have occurred in the
-        /// directory.</para>
-        ///
-        /// <para>If instead this reader is a near real-time reader
-        /// (ie, obtained by a call to 
-        /// <see cref="DirectoryReader.Open(IndexWriter, bool)"/>, or by calling <see cref="OpenIfChanged"/>
-        /// on a near real-time reader), then this method checks if
-        /// either a new commit has occurred, or any new
-        /// uncommitted changes have taken place via the writer.
-        /// Note that even if the writer has only performed
-        /// merging, this method will still return <c>false</c>.</para>
-        ///
-        /// <para>In any event, if this returns <c>false</c>, you should call
-        /// <see cref="OpenIfChanged"/> to get a new reader that sees the
-        /// changes.</para>
-        /// </summary>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        public abstract bool IsCurrent();
-
-        /// <summary>
-        /// Expert: return the <see cref="Index.IndexCommit"/> that this reader has opened.
-        /// <para/>
-        /// @lucene.experimental
-        /// </summary>
-        public abstract IndexCommit IndexCommit { get; }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocConsumer.cs b/src/Lucene.Net.Core/Index/DocConsumer.cs
deleted file mode 100644
index 6801018..0000000
--- a/src/Lucene.Net.Core/Index/DocConsumer.cs
+++ /dev/null
@@ -1,34 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal abstract class DocConsumer
-    {
-        public abstract void ProcessDocument(FieldInfos.Builder fieldInfos);
-
-        internal abstract void FinishDocument();
-
-        public abstract void Flush(SegmentWriteState state);
-
-        public abstract void Abort();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocFieldConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocFieldConsumer.cs b/src/Lucene.Net.Core/Index/DocFieldConsumer.cs
deleted file mode 100644
index 283704b..0000000
--- a/src/Lucene.Net.Core/Index/DocFieldConsumer.cs
+++ /dev/null
@@ -1,43 +0,0 @@
-using System;
-using System.Collections.Generic;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal abstract class DocFieldConsumer
-    {
-        /// <summary>
-        /// Called when <see cref="DocumentsWriterPerThread"/> decides to create a new
-        /// segment
-        /// </summary>
-        internal abstract void Flush(IDictionary<string, DocFieldConsumerPerField> fieldsToFlush, SegmentWriteState state);
-
-        /// <summary>
-        /// Called when an aborting exception is hit </summary>
-        internal abstract void Abort();
-
-        public abstract void StartDocument();
-
-        public abstract DocFieldConsumerPerField AddField(FieldInfo fi);
-
-        public abstract void FinishDocument();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocFieldConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocFieldConsumerPerField.cs b/src/Lucene.Net.Core/Index/DocFieldConsumerPerField.cs
deleted file mode 100644
index 9cd1bf0..0000000
--- a/src/Lucene.Net.Core/Index/DocFieldConsumerPerField.cs
+++ /dev/null
@@ -1,34 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal abstract class DocFieldConsumerPerField
-    {
-        /// <summary>
-        /// Processes all occurrences of a single field </summary>
-        public abstract void ProcessFields(IIndexableField[] fields, int count);
-
-        internal abstract void Abort();
-
-        internal abstract FieldInfo FieldInfo { get; }
-    }
-}
\ No newline at end of file


Mime
View raw message