lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [11/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:17 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/LogDocMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/LogDocMergePolicy.cs b/src/Lucene.Net.Core/Index/LogDocMergePolicy.cs
deleted file mode 100644
index 397e1f4..0000000
--- a/src/Lucene.Net.Core/Index/LogDocMergePolicy.cs
+++ /dev/null
@@ -1,77 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// this is a <seealso cref="LogMergePolicy"/> that measures size of a
-    ///  segment as the number of documents (not taking deletions
-    ///  into account).
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class LogDocMergePolicy : LogMergePolicy
-    {
-        /// Default minimum segment size.  <seealso cref= setMinMergeDocs </seealso>
-        public static readonly int DEFAULT_MIN_MERGE_DOCS = 1000;
-
-        /// <summary>
-        /// Sole constructor, setting all settings to their
-        ///  defaults.
-        /// </summary>
-        public LogDocMergePolicy()
-        {
-            m_minMergeSize = DEFAULT_MIN_MERGE_DOCS;
-
-            // maxMergeSize(ForForcedMerge) are never used by LogDocMergePolicy; set
-            // it to Long.MAX_VALUE to disable it
-            m_maxMergeSize = long.MaxValue;
-            m_maxMergeSizeForForcedMerge = long.MaxValue;
-        }
-
-        protected override long Size(SegmentCommitInfo info)
-        {
-            return SizeDocs(info);
-        }
-
-        /// <summary>
-        /// Sets the minimum size for the lowest level segments.
-        /// Any segments below this size are considered to be on
-        /// the same level (even if they vary drastically in size)
-        /// and will be merged whenever there are mergeFactor of
-        /// them.  this effectively truncates the "long tail" of
-        /// small segments that would otherwise be created into a
-        /// single level.  If you set this too large, it could
-        /// greatly increase the merging cost during indexing (if
-        /// you flush many small segments).
-        /// </summary>
-        public virtual int MinMergeDocs
-        {
-            set
-            {
-                m_minMergeSize = value;
-            }
-            get
-            {
-                return (int)m_minMergeSize;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/LogMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/LogMergePolicy.cs b/src/Lucene.Net.Core/Index/LogMergePolicy.cs
deleted file mode 100644
index 4c2b3ab..0000000
--- a/src/Lucene.Net.Core/Index/LogMergePolicy.cs
+++ /dev/null
@@ -1,771 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Globalization;
-using System.Text;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// <p>this class implements a <seealso cref="MergePolicy"/> that tries
-    /// to merge segments into levels of exponentially
-    /// increasing size, where each level has fewer segments than
-    /// the value of the merge factor. Whenever extra segments
-    /// (beyond the merge factor upper bound) are encountered,
-    /// all segments within the level are merged. You can get or
-    /// set the merge factor using <seealso cref="#getMergeFactor()"/> and
-    /// <seealso cref="#setMergeFactor(int)"/> respectively.</p>
-    ///
-    /// <p>this class is abstract and requires a subclass to
-    /// define the <seealso cref="#size"/> method which specifies how a
-    /// segment's size is determined.  <seealso cref="LogDocMergePolicy"/>
-    /// is one subclass that measures size by document count in
-    /// the segment.  <seealso cref="LogByteSizeMergePolicy"/> is another
-    /// subclass that measures size as the total byte size of the
-    /// file(s) for the segment.</p>
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class LogMergePolicy : MergePolicy
-    {
-        /// <summary>
-        /// Defines the allowed range of log(size) for each
-        ///  level.  A level is computed by taking the max segment
-        ///  log size, minus LEVEL_LOG_SPAN, and finding all
-        ///  segments falling within that range.
-        /// </summary>
-        public static readonly double LEVEL_LOG_SPAN = 0.75;
-
-        /// <summary>
-        /// Default merge factor, which is how many segments are
-        ///  merged at a time
-        /// </summary>
-        public static readonly int DEFAULT_MERGE_FACTOR = 10;
-
-        /// <summary>
-        /// Default maximum segment size.  A segment of this size </summary>
-        ///  or larger will never be merged.  <seealso cref= setMaxMergeDocs  </seealso>
-        public static readonly int DEFAULT_MAX_MERGE_DOCS = int.MaxValue;
-
-        /// <summary>
-        /// Default noCFSRatio.  If a merge's size is >= 10% of
-        ///  the index, then we disable compound file for it. </summary>
-        ///  <seealso cref= MergePolicy#setNoCFSRatio  </seealso>
-        public new static readonly double DEFAULT_NO_CFS_RATIO = 0.1;
-
-        /// <summary>
-        /// How many segments to merge at a time. </summary>
-        protected int m_mergeFactor = DEFAULT_MERGE_FACTOR;
-
-        /// <summary>
-        /// Any segments whose size is smaller than this value
-        ///  will be rounded up to this value.  this ensures that
-        ///  tiny segments are aggressively merged.
-        /// </summary>
-        protected long m_minMergeSize;
-
-        /// <summary>
-        /// If the size of a segment exceeds this value then it
-        ///  will never be merged.
-        /// </summary>
-        protected long m_maxMergeSize;
-
-        // Although the core MPs set it explicitly, we must default in case someone
-        // out there wrote his own LMP ...
-        /// <summary>
-        /// If the size of a segment exceeds this value then it
-        /// will never be merged during <seealso cref="IndexWriter#forceMerge"/>.
-        /// </summary>
-        protected long m_maxMergeSizeForForcedMerge = long.MaxValue;
-
-        /// <summary>
-        /// If a segment has more than this many documents then it
-        ///  will never be merged.
-        /// </summary>
-        protected int m_maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
-
-        /// <summary>
-        /// If true, we pro-rate a segment's size by the
-        ///  percentage of non-deleted documents.
-        /// </summary>
-        protected bool m_calibrateSizeByDeletes = true;
-
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
-        /// </summary>
-        public LogMergePolicy()
-            : base(DEFAULT_NO_CFS_RATIO, MergePolicy.DEFAULT_MAX_CFS_SEGMENT_SIZE)
-        {
-        }
-
-        /// <summary>
-        /// Returns true if <see cref="LogMergePolicy"/> is enabled in <see cref="IndexWriter.infoStream"/>.
-        /// </summary>
-        protected virtual bool IsVerbose
-        {
-            get
-            {
-                IndexWriter w = m_writer.Get();
-                return w != null && w.infoStream.IsEnabled("LMP");
-            }
-        }
-
-        /// <summary>
-        /// Print a debug message to <seealso cref="IndexWriter"/>'s {@code
-        ///  infoStream}.
-        /// </summary>
-        protected virtual void Message(string message)
-        {
-            if (IsVerbose)
-            {
-                m_writer.Get().infoStream.Message("LMP", message);
-            }
-        }
-
-        /// <summary>
-        /// Gets or Sets the number of segments that are merged at
-        /// once and also controls the total number of segments
-        /// allowed to accumulate in the index.
-        /// <para/>
-        /// This determines how often segment indices are merged by
-        /// AddDocument().  With smaller values, less RAM is used
-        /// while indexing, and searches are
-        /// faster, but indexing speed is slower.  With larger
-        /// values, more RAM is used during indexing, and while
-        /// searches is slower, indexing is
-        /// faster.  Thus larger values (> 10) are best for batch
-        /// index creation, and smaller values (&lt; 10) for indices
-        /// that are interactively maintained.
-        /// </summary>
-        public virtual int MergeFactor
-        {
-            get
-            {
-                return m_mergeFactor;
-            }
-            set
-            {
-                if (value < 2)
-                {
-                    throw new System.ArgumentException("mergeFactor cannot be less than 2");
-                }
-                this.m_mergeFactor = value;
-            }
-        }
-
-        /// <summary>
-        /// Gets or Sets whether the segment size should be calibrated by
-        ///  the number of deletes when choosing segments for merge.
-        /// </summary>
-        public virtual bool CalibrateSizeByDeletes
-        {
-            set
-            {
-                this.m_calibrateSizeByDeletes = value;
-            }
-            get
-            {
-                return m_calibrateSizeByDeletes;
-            }
-        }
-
-        protected override void Dispose(bool disposing)
-        {
-        }
-
-        /// <summary>
-        /// Return the number of documents in the provided {@link
-        ///  SegmentCommitInfo}, pro-rated by percentage of
-        ///  non-deleted documents if {@link
-        ///  #setCalibrateSizeByDeletes} is set.
-        /// </summary>
-        protected virtual long SizeDocs(SegmentCommitInfo info)
-        {
-            if (m_calibrateSizeByDeletes)
-            {
-                int delCount = m_writer.Get().NumDeletedDocs(info);
-                Debug.Assert(delCount <= info.Info.DocCount);
-                return (info.Info.DocCount - (long)delCount);
-            }
-            else
-            {
-                return info.Info.DocCount;
-            }
-        }
-
-        /// <summary>
-        /// Return the byte size of the provided {@link
-        ///  SegmentCommitInfo}, pro-rated by percentage of
-        ///  non-deleted documents if {@link
-        ///  #setCalibrateSizeByDeletes} is set.
-        /// </summary>
-        protected virtual long SizeBytes(SegmentCommitInfo info)
-        {
-            if (m_calibrateSizeByDeletes)
-            {
-                return base.Size(info);
-            }
-            return info.GetSizeInBytes();
-        }
-
-        /// <summary>
-        /// Returns true if the number of segments eligible for
-        ///  merging is less than or equal to the specified {@code
-        ///  maxNumSegments}.
-        /// </summary>
-        protected virtual bool IsMerged(SegmentInfos infos, int maxNumSegments, IDictionary<SegmentCommitInfo, bool?> segmentsToMerge)
-        {
-            int numSegments = infos.Count;
-            int numToMerge = 0;
-            SegmentCommitInfo mergeInfo = null;
-            bool segmentIsOriginal = false;
-            for (int i = 0; i < numSegments && numToMerge <= maxNumSegments; i++)
-            {
-                SegmentCommitInfo info = infos.Info(i);
-                bool? isOriginal;
-                segmentsToMerge.TryGetValue(info, out isOriginal);
-                if (isOriginal != null)
-                {
-                    segmentIsOriginal = isOriginal.Value;
-                    numToMerge++;
-                    mergeInfo = info;
-                }
-            }
-
-            return numToMerge <= maxNumSegments && (numToMerge != 1 || !segmentIsOriginal || IsMerged(infos, mergeInfo));
-        }
-
-        /// <summary>
-        /// Returns the merges necessary to merge the index, taking the max merge
-        /// size or max merge docs into consideration. this method attempts to respect
-        /// the {@code maxNumSegments} parameter, however it might be, due to size
-        /// constraints, that more than that number of segments will remain in the
-        /// index. Also, this method does not guarantee that exactly {@code
-        /// maxNumSegments} will remain, but &lt;= that number.
-        /// </summary>
-        private MergeSpecification FindForcedMergesSizeLimit(SegmentInfos infos, int maxNumSegments, int last)
-        {
-            MergeSpecification spec = new MergeSpecification();
-            IList<SegmentCommitInfo> segments = infos.AsList();
-
-            int start = last - 1;
-            while (start >= 0)
-            {
-                SegmentCommitInfo info = infos.Info(start);
-                if (Size(info) > m_maxMergeSizeForForcedMerge || SizeDocs(info) > m_maxMergeDocs)
-                {
-                    if (IsVerbose)
-                    {
-                        Message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + m_maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + m_maxMergeDocs + ")");
-                    }
-                    // need to skip that segment + add a merge for the 'right' segments,
-                    // unless there is only 1 which is merged.
-                    if (last - start - 1 > 1 || (start != last - 1 && !IsMerged(infos, infos.Info(start + 1))))
-                    {
-                        // there is more than 1 segment to the right of
-                        // this one, or a mergeable single segment.
-                        spec.Add(new OneMerge(segments.SubList(start + 1, last)));
-                    }
-                    last = start;
-                }
-                else if (last - start == m_mergeFactor)
-                {
-                    // mergeFactor eligible segments were found, add them as a merge.
-                    spec.Add(new OneMerge(segments.SubList(start, last)));
-                    last = start;
-                }
-                --start;
-            }
-
-            // Add any left-over segments, unless there is just 1
-            // already fully merged
-            if (last > 0 && (++start + 1 < last || !IsMerged(infos, infos.Info(start))))
-            {
-                spec.Add(new OneMerge(segments.SubList(start, last)));
-            }
-
-            return spec.Merges.Count == 0 ? null : spec;
-        }
-
-        /// <summary>
-        /// Returns the merges necessary to forceMerge the index. this method constraints
-        /// the returned merges only by the {@code maxNumSegments} parameter, and
-        /// guaranteed that exactly that number of segments will remain in the index.
-        /// </summary>
-        private MergeSpecification FindForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last)
-        {
-            var spec = new MergeSpecification();
-            var segments = infos.AsList();
-
-            // First, enroll all "full" merges (size
-            // mergeFactor) to potentially be run concurrently:
-            while (last - maxNumSegments + 1 >= m_mergeFactor)
-            {
-                spec.Add(new OneMerge(segments.SubList(last - m_mergeFactor, last)));
-                last -= m_mergeFactor;
-            }
-
-            // Only if there are no full merges pending do we
-            // add a final partial (< mergeFactor segments) merge:
-            if (0 == spec.Merges.Count)
-            {
-                if (maxNumSegments == 1)
-                {
-                    // Since we must merge down to 1 segment, the
-                    // choice is simple:
-                    if (last > 1 || !IsMerged(infos, infos.Info(0)))
-                    {
-                        spec.Add(new OneMerge(segments.SubList(0, last)));
-                    }
-                }
-                else if (last > maxNumSegments)
-                {
-                    // Take care to pick a partial merge that is
-                    // least cost, but does not make the index too
-                    // lopsided.  If we always just picked the
-                    // partial tail then we could produce a highly
-                    // lopsided index over time:
-
-                    // We must merge this many segments to leave
-                    // maxNumSegments in the index (from when
-                    // forceMerge was first kicked off):
-                    int finalMergeSize = last - maxNumSegments + 1;
-
-                    // Consider all possible starting points:
-                    long bestSize = 0;
-                    int bestStart = 0;
-
-                    for (int i = 0; i < last - finalMergeSize + 1; i++)
-                    {
-                        long sumSize = 0;
-                        for (int j = 0; j < finalMergeSize; j++)
-                        {
-                            sumSize += Size(infos.Info(j + i));
-                        }
-                        if (i == 0 || (sumSize < 2 * Size(infos.Info(i - 1)) && sumSize < bestSize))
-                        {
-                            bestStart = i;
-                            bestSize = sumSize;
-                        }
-                    }
-
-                    spec.Add(new OneMerge(segments.SubList(bestStart, bestStart + finalMergeSize)));
-                }
-            }
-            return spec.Merges.Count == 0 ? null : spec;
-        }
-
-        /// <summary>
-        /// Returns the merges necessary to merge the index down
-        ///  to a specified number of segments.
-        ///  this respects the <seealso cref="#maxMergeSizeForForcedMerge"/> setting.
-        ///  By default, and assuming {@code maxNumSegments=1}, only
-        ///  one segment will be left in the index, where that segment
-        ///  has no deletions pending nor separate norms, and it is in
-        ///  compound file format if the current useCompoundFile
-        ///  setting is true.  this method returns multiple merges
-        ///  (mergeFactor at a time) so the <seealso cref="MergeScheduler"/>
-        ///  in use may make use of concurrency.
-        /// </summary>
-        public override MergeSpecification FindForcedMerges(SegmentInfos infos, int maxNumSegments, IDictionary<SegmentCommitInfo, bool?> segmentsToMerge)
-        {
-            Debug.Assert(maxNumSegments > 0);
-            if (IsVerbose)
-            {
-                Message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge=" + Arrays.ToString(segmentsToMerge));
-            }
-
-            // If the segments are already merged (e.g. there's only 1 segment), or
-            // there are <maxNumSegments:.
-            if (IsMerged(infos, maxNumSegments, segmentsToMerge))
-            {
-                if (IsVerbose)
-                {
-                    Message("already merged; skip");
-                }
-                return null;
-            }
-
-            // Find the newest (rightmost) segment that needs to
-            // be merged (other segments may have been flushed
-            // since merging started):
-            int last = infos.Count;
-            while (last > 0)
-            {
-                SegmentCommitInfo info = infos.Info(--last);
-                if (segmentsToMerge.ContainsKey(info))
-                {
-                    last++;
-                    break;
-                }
-            }
-
-            if (last == 0)
-            {
-                if (IsVerbose)
-                {
-                    Message("last == 0; skip");
-                }
-                return null;
-            }
-
-            // There is only one segment already, and it is merged
-            if (maxNumSegments == 1 && last == 1 && IsMerged(infos, infos.Info(0)))
-            {
-                if (IsVerbose)
-                {
-                    Message("already 1 seg; skip");
-                }
-                return null;
-            }
-
-            // Check if there are any segments above the threshold
-            bool anyTooLarge = false;
-            for (int i = 0; i < last; i++)
-            {
-                SegmentCommitInfo info = infos.Info(i);
-                if (Size(info) > m_maxMergeSizeForForcedMerge || SizeDocs(info) > m_maxMergeDocs)
-                {
-                    anyTooLarge = true;
-                    break;
-                }
-            }
-
-            if (anyTooLarge)
-            {
-                return FindForcedMergesSizeLimit(infos, maxNumSegments, last);
-            }
-            else
-            {
-                return FindForcedMergesMaxNumSegments(infos, maxNumSegments, last);
-            }
-        }
-
-        /// <summary>
-        /// Finds merges necessary to force-merge all deletes from the
-        /// index.  We simply merge adjacent segments that have
-        /// deletes, up to mergeFactor at a time.
-        /// </summary>
-        public override MergeSpecification FindForcedDeletesMerges(SegmentInfos segmentInfos)
-        {
-            var segments = segmentInfos.AsList();
-            int numSegments = segments.Count;
-
-            if (IsVerbose)
-            {
-                Message("findForcedDeleteMerges: " + numSegments + " segments");
-            }
-
-            var spec = new MergeSpecification();
-            int firstSegmentWithDeletions = -1;
-            IndexWriter w = m_writer.Get();
-            Debug.Assert(w != null);
-            for (int i = 0; i < numSegments; i++)
-            {
-                SegmentCommitInfo info = segmentInfos.Info(i);
-                int delCount = w.NumDeletedDocs(info);
-                if (delCount > 0)
-                {
-                    if (IsVerbose)
-                    {
-                        Message("  segment " + info.Info.Name + " has deletions");
-                    }
-                    if (firstSegmentWithDeletions == -1)
-                    {
-                        firstSegmentWithDeletions = i;
-                    }
-                    else if (i - firstSegmentWithDeletions == m_mergeFactor)
-                    {
-                        // We've seen mergeFactor segments in a row with
-                        // deletions, so force a merge now:
-                        if (IsVerbose)
-                        {
-                            Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
-                        }
-                        spec.Add(new OneMerge(segments.SubList(firstSegmentWithDeletions, i)));
-                        firstSegmentWithDeletions = i;
-                    }
-                }
-                else if (firstSegmentWithDeletions != -1)
-                {
-                    // End of a sequence of segments with deletions, so,
-                    // merge those past segments even if it's fewer than
-                    // mergeFactor segments
-                    if (IsVerbose)
-                    {
-                        Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
-                    }
-                    spec.Add(new OneMerge(segments.SubList(firstSegmentWithDeletions, i)));
-                    firstSegmentWithDeletions = -1;
-                }
-            }
-
-            if (firstSegmentWithDeletions != -1)
-            {
-                if (IsVerbose)
-                {
-                    Message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive");
-                }
-                spec.Add(new OneMerge(segments.SubList(firstSegmentWithDeletions, numSegments)));
-            }
-
-            return spec;
-        }
-
-        private class SegmentInfoAndLevel : IComparable<SegmentInfoAndLevel>
-        {
-            internal readonly SegmentCommitInfo info;
-            internal readonly float level;
-            private int index;
-
-            public SegmentInfoAndLevel(SegmentCommitInfo info, float level, int index)
-            {
-                this.info = info;
-                this.level = level;
-                this.index = index;
-            }
-
-            // Sorts largest to smallest
-            public virtual int CompareTo(SegmentInfoAndLevel other)
-            {
-                return other.level.CompareTo(level);
-            }
-        }
-
-        /// <summary>
-        /// Checks if any merges are now necessary and returns a
-        ///  <seealso cref="MergePolicy.MergeSpecification"/> if so.  A merge
-        ///  is necessary when there are more than {@link
-        ///  #setMergeFactor} segments at a given level.  When
-        ///  multiple levels have too many segments, this method
-        ///  will return multiple merges, allowing the {@link
-        ///  MergeScheduler} to use concurrency.
-        /// </summary>
-        public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos infos)
-        {
-            int numSegments = infos.Count;
-            if (IsVerbose)
-            {
-                Message("findMerges: " + numSegments + " segments");
-            }
-
-            // Compute levels, which is just log (base mergeFactor)
-            // of the size of each segment
-            IList<SegmentInfoAndLevel> levels = new List<SegmentInfoAndLevel>();
-            var norm = (float)Math.Log(m_mergeFactor);
-
-            ICollection<SegmentCommitInfo> mergingSegments = m_writer.Get().MergingSegments;
-
-            for (int i = 0; i < numSegments; i++)
-            {
-                SegmentCommitInfo info = infos.Info(i);
-                long size = Size(info);
-
-                // Floor tiny segments
-                if (size < 1)
-                {
-                    size = 1;
-                }
-
-                SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float)Math.Log(size) / norm, i);
-                levels.Add(infoLevel);
-
-                if (IsVerbose)
-                {
-                    long segBytes = SizeBytes(info);
-                    string extra = mergingSegments.Contains(info) ? " [merging]" : "";
-                    if (size >= m_maxMergeSize)
-                    {
-                        extra += " [skip: too large]";
-                    }
-                    Message("seg=" + m_writer.Get().SegString(info) + " level=" + infoLevel.level + " size=" + String.Format(CultureInfo.InvariantCulture, "{0:0.00} MB", segBytes / 1024 / 1024.0) + extra);
-                }
-            }
-
-            float levelFloor;
-            if (m_minMergeSize <= 0)
-            {
-                levelFloor = (float)0.0;
-            }
-            else
-            {
-                levelFloor = (float)(Math.Log(m_minMergeSize) / norm);
-            }
-
-            // Now, we quantize the log values into levels.  The
-            // first level is any segment whose log size is within
-            // LEVEL_LOG_SPAN of the max size, or, who has such as
-            // segment "to the right".  Then, we find the max of all
-            // other segments and use that to define the next level
-            // segment, etc.
-
-            MergeSpecification spec = null;
-
-            int numMergeableSegments = levels.Count;
-
-            int start = 0;
-            while (start < numMergeableSegments)
-            {
-                // Find max level of all segments not already
-                // quantized.
-                float maxLevel = levels[start].level;
-                for (int i = 1 + start; i < numMergeableSegments; i++)
-                {
-                    float level = levels[i].level;
-                    if (level > maxLevel)
-                    {
-                        maxLevel = level;
-                    }
-                }
-
-                // Now search backwards for the rightmost segment that
-                // falls into this level:
-                float levelBottom;
-                if (maxLevel <= levelFloor)
-                {
-                    // All remaining segments fall into the min level
-                    levelBottom = -1.0F;
-                }
-                else
-                {
-                    levelBottom = (float)(maxLevel - LEVEL_LOG_SPAN);
-
-                    // Force a boundary at the level floor
-                    if (levelBottom < levelFloor && maxLevel >= levelFloor)
-                    {
-                        levelBottom = levelFloor;
-                    }
-                }
-
-                int upto = numMergeableSegments - 1;
-                while (upto >= start)
-                {
-                    if (levels[upto].level >= levelBottom)
-                    {
-                        break;
-                    }
-                    upto--;
-                }
-                if (IsVerbose)
-                {
-                    Message("  level " + levelBottom.ToString("0.0") + " to " + maxLevel.ToString("0.0") + ": " + (1 + upto - start) + " segments");
-                }
-
-                // Finally, record all merges that are viable at this level:
-                int end = start + m_mergeFactor;
-                while (end <= 1 + upto)
-                {
-                    bool anyTooLarge = false;
-                    bool anyMerging = false;
-                    for (int i = start; i < end; i++)
-                    {
-                        SegmentCommitInfo info = levels[i].info;
-                        anyTooLarge |= (Size(info) >= m_maxMergeSize || SizeDocs(info) >= m_maxMergeDocs);
-                        if (mergingSegments.Contains(info))
-                        {
-                            anyMerging = true;
-                            break;
-                        }
-                    }
-
-                    if (anyMerging)
-                    {
-                        // skip
-                    }
-                    else if (!anyTooLarge)
-                    {
-                        if (spec == null)
-                        {
-                            spec = new MergeSpecification();
-                        }
-                        IList<SegmentCommitInfo> mergeInfos = new List<SegmentCommitInfo>();
-                        for (int i = start; i < end; i++)
-                        {
-                            mergeInfos.Add(levels[i].info);
-                            Debug.Assert(infos.Contains(levels[i].info));
-                        }
-                        if (IsVerbose)
-                        {
-                            Message("  add merge=" + m_writer.Get().SegString(mergeInfos) + " start=" + start + " end=" + end);
-                        }
-                        spec.Add(new OneMerge(mergeInfos));
-                    }
-                    else if (IsVerbose)
-                    {
-                        Message("    " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
-                    }
-
-                    start = end;
-                    end = start + m_mergeFactor;
-                }
-
-                start = 1 + upto;
-            }
-
-            return spec;
-        }
-
-        /// <summary>
-        /// <p>Determines the largest segment (measured by
-        /// document count) that may be merged with other segments.
-        /// Small values (e.g., less than 10,000) are best for
-        /// interactive indexing, as this limits the length of
-        /// pauses while indexing to a few seconds.  Larger values
-        /// are best for batched indexing and speedier
-        /// searches.</p>
-        ///
-        /// <p>The default value is <see cref="int.MaxValue"/>.</p>
-        ///
-        /// <p>The default merge policy ({@link
-        /// LogByteSizeMergePolicy}) also allows you to set this
-        /// limit by net size (in MB) of the segment, using {@link
-        /// LogByteSizeMergePolicy#setMaxMergeMB}.</p>
-        /// </summary>
-        public virtual int MaxMergeDocs
-        {
-            set
-            {
-                this.m_maxMergeDocs = value;
-            }
-            get
-            {
-                return m_maxMergeDocs;
-            }
-        }
-
-        public override string ToString()
-        {
-            StringBuilder sb = new StringBuilder("[" + this.GetType().Name + ": ");
-            sb.Append("minMergeSize=").Append(m_minMergeSize).Append(", ");
-            sb.Append("mergeFactor=").Append(m_mergeFactor).Append(", ");
-            sb.Append("maxMergeSize=").Append(m_maxMergeSize).Append(", ");
-            sb.Append("maxMergeSizeForForcedMerge=").Append(m_maxMergeSizeForForcedMerge).Append(", ");
-            sb.Append("calibrateSizeByDeletes=").Append(m_calibrateSizeByDeletes).Append(", ");
-            sb.Append("maxMergeDocs=").Append(m_maxMergeDocs).Append(", ");
-            sb.Append("maxCFSSegmentSizeMB=").Append(MaxCFSSegmentSizeMB).Append(", ");
-            sb.Append("noCFSRatio=").Append(m_noCFSRatio);
-            sb.Append("]");
-            return sb.ToString();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/MergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/MergePolicy.cs b/src/Lucene.Net.Core/Index/MergePolicy.cs
deleted file mode 100644
index 72e2d68..0000000
--- a/src/Lucene.Net.Core/Index/MergePolicy.cs
+++ /dev/null
@@ -1,807 +0,0 @@
-using Lucene.Net.Util;
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-#if FEATURE_SERIALIZABLE
-using System.Runtime.Serialization;
-#endif
-using System.Text;
-using System.Threading;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using Directory = Lucene.Net.Store.Directory;
-    using FixedBitSet = Lucene.Net.Util.FixedBitSet;
-    using MergeInfo = Lucene.Net.Store.MergeInfo;
-
-    //using AlreadySetException = Lucene.Net.Util.SetOnce.AlreadySetException;
-
-    /// <summary>
-    /// <p>Expert: a MergePolicy determines the sequence of
-    /// primitive merge operations.</p>
-    ///
-    /// <p>Whenever the segments in an index have been altered by
-    /// <seealso cref="IndexWriter"/>, either the addition of a newly
-    /// flushed segment, addition of many segments from
-    /// addIndexes* calls, or a previous merge that may now need
-    /// to cascade, <seealso cref="IndexWriter"/> invokes {@link
-    /// #findMerges} to give the MergePolicy a chance to pick
-    /// merges that are now required.  this method returns a
-    /// <seealso cref="MergeSpecification"/> instance describing the set of
-    /// merges that should be done, or null if no merges are
-    /// necessary.  When IndexWriter.forceMerge is called, it calls
-    /// <seealso cref="#findForcedMerges(SegmentInfos,int,Map)"/> and the MergePolicy should
-    /// then return the necessary merges.</p>
-    ///
-    /// <p>Note that the policy can return more than one merge at
-    /// a time.  In this case, if the writer is using {@link
-    /// SerialMergeScheduler}, the merges will be run
-    /// sequentially but if it is using {@link
-    /// ConcurrentMergeScheduler} they will be run concurrently.</p>
-    ///
-    /// <p>The default MergePolicy is {@link
-    /// TieredMergePolicy}.</p>
-    ///
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class MergePolicy : IDisposable
-    {
-        /// <summary>
-        /// A map of doc IDs. </summary>
-        public abstract class DocMap
-        {
-            /// <summary>
-            /// Sole constructor, typically invoked from sub-classes constructors. </summary>
-            protected DocMap()
-            {
-            }
-
-            /// <summary>
-            /// Return the new doc ID according to its old value. </summary>
-            public abstract int Map(int old);
-
-            /// <summary>
-            /// Useful from an assert. </summary>
-            internal virtual bool IsConsistent(int maxDoc)
-            {
-                FixedBitSet targets = new FixedBitSet(maxDoc);
-                for (int i = 0; i < maxDoc; ++i)
-                {
-                    int target = Map(i);
-                    if (target < 0 || target >= maxDoc)
-                    {
-                        Debug.Assert(false, "out of range: " + target + " not in [0-" + maxDoc + "[");
-                        return false;
-                    }
-                    else if (targets.Get(target))
-                    {
-                        Debug.Assert(false, target + " is already taken (" + i + ")");
-                        return false;
-                    }
-                }
-                return true;
-            }
-        }
-
-        /// <summary>
-        /// OneMerge provides the information necessary to perform
-        ///  an individual primitive merge operation, resulting in
-        ///  a single new segment.  The merge spec includes the
-        ///  subset of segments to be merged as well as whether the
-        ///  new segment should use the compound file format.
-        /// </summary>
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public class OneMerge
-        {
-            internal SegmentCommitInfo info; // used by IndexWriter
-            internal bool registerDone; // used by IndexWriter
-            internal long mergeGen; // used by IndexWriter
-            internal bool isExternal; // used by IndexWriter
-
-            public int MaxNumSegments // used by IndexWriter
-            {
-                get { return maxNumSegments; }
-                set { maxNumSegments = value; }
-            }
-            private int maxNumSegments = -1;
-
-            /// <summary>
-            /// Estimated size in bytes of the merged segment. </summary>
-            public long EstimatedMergeBytes { get; internal set; } // used by IndexWriter // LUCENENET NOTE: original was volatile, but long cannot be in .NET
-
-
-            // Sum of sizeInBytes of all SegmentInfos; set by IW.mergeInit
-            internal long totalMergeBytes; // LUCENENET NOTE: original was volatile, but long cannot be in .NET
-
-            internal IList<SegmentReader> readers; // used by IndexWriter
-
-            /// <summary>
-            /// Segments to be merged. </summary>
-            public IList<SegmentCommitInfo> Segments { get; private set; }
-
-            /// <summary>
-            /// Number of documents in the merged segment. </summary>
-            public int TotalDocCount { get; private set; }
-
-            internal bool aborted;
-            internal Exception error;
-            internal bool paused;
-
-            /// <summary>
-            /// Sole constructor. </summary>
-            /// <param name="segments"> List of <seealso cref="SegmentCommitInfo"/>s
-            ///        to be merged.  </param>
-            public OneMerge(IList<SegmentCommitInfo> segments)
-            {
-                if (0 == segments.Count)
-                {
-                    throw new Exception("segments must include at least one segment");
-                }
-                // clone the list, as the in list may be based off original SegmentInfos and may be modified
-                this.Segments = new List<SegmentCommitInfo>(segments);
-                int count = 0;
-                foreach (SegmentCommitInfo info in segments)
-                {
-                    count += info.Info.DocCount;
-                }
-                TotalDocCount = count;
-            }
-
-            /// <summary>
-            /// Expert: Get the list of readers to merge. Note that this list does not
-            ///  necessarily match the list of segments to merge and should only be used
-            ///  to feed SegmentMerger to initialize a merge. When a <seealso cref="OneMerge"/>
-            ///  reorders doc IDs, it must override <seealso cref="#getDocMap"/> too so that
-            ///  deletes that happened during the merge can be applied to the newly
-            ///  merged segment.
-            /// </summary>
-            public virtual IList<AtomicReader> GetMergeReaders()
-            {
-                if (this.readers == null)
-                {
-                    throw new InvalidOperationException("IndexWriter has not initialized readers from the segment infos yet");
-                }
-                IList<AtomicReader> readers = new List<AtomicReader>(this.readers.Count);
-                foreach (AtomicReader reader in this.readers)
-                {
-                    if (reader.NumDocs > 0)
-                    {
-                        readers.Add(reader);
-                    }
-                }
-                return Collections.UnmodifiableList(readers);
-            }
-
-            /// <summary>
-            /// Expert: Sets the <seealso cref="SegmentCommitInfo"/> of this <seealso cref="OneMerge"/>.
-            /// Allows sub-classes to e.g. set diagnostics properties.
-            /// </summary>
-            public virtual SegmentCommitInfo Info
-            {
-                set
-                {
-                    this.info = value;
-                }
-                get
-                {
-                    return info;
-                }
-            }
-
-            /// <summary>
-            /// Expert: If <seealso cref="#getMergeReaders()"/> reorders document IDs, this method
-            ///  must be overridden to return a mapping from the <i>natural</i> doc ID
-            ///  (the doc ID that would result from a natural merge) to the actual doc
-            ///  ID. this mapping is used to apply deletions that happened during the
-            ///  merge to the new segment.
-            /// </summary>
-            public virtual DocMap GetDocMap(MergeState mergeState)
-            {
-                return new DocMapAnonymousInnerClassHelper(this);
-            }
-
-            private class DocMapAnonymousInnerClassHelper : DocMap
-            {
-                private readonly OneMerge outerInstance;
-
-                public DocMapAnonymousInnerClassHelper(OneMerge outerInstance)
-                {
-                    this.outerInstance = outerInstance;
-                }
-
-                public override int Map(int docID)
-                {
-                    return docID;
-                }
-            }
-
-            /// <summary>
-            /// Record that an exception occurred while executing
-            ///  this merge
-            /// </summary>
-            internal virtual Exception Exception
-            {
-                set
-                {
-                    lock (this)
-                    {
-                        this.error = value;
-                    }
-                }
-                get
-                {
-                    lock (this)
-                    {
-                        return error;
-                    }
-                }
-            }
-
-            /// <summary>
-            /// Mark this merge as aborted.  If this is called
-            ///  before the merge is committed then the merge will
-            ///  not be committed.
-            /// </summary>
-            internal virtual void Abort()
-            {
-                lock (this)
-                {
-                    aborted = true;
-                    Monitor.PulseAll(this);
-                }
-            }
-
-            /// <summary>
-            /// Returns true if this merge was aborted. </summary>
-            internal virtual bool IsAborted
-            {
-                get
-                {
-                    lock (this)
-                    {
-                        return aborted;
-                    }
-                }
-            }
-
-            /// <summary>
-            /// Called periodically by <seealso cref="IndexWriter"/> while
-            ///  merging to see if the merge is aborted.
-            /// </summary>
-            public virtual void CheckAborted(Directory dir)
-            {
-                lock (this)
-                {
-                    if (aborted)
-                    {
-                        throw new MergeAbortedException("merge is aborted: " + SegString(dir));
-                    }
-
-                    while (paused)
-                    {
-#if !NETSTANDARD
-                        try
-                        {
-#endif
-                            // In theory we could wait() indefinitely, but we
-                            // do 1000 msec, defensively
-                            Monitor.Wait(this, TimeSpan.FromMilliseconds(1000));
-#if !NETSTANDARD
-                        }
-                        catch (ThreadInterruptedException ie)
-                        {
-                            throw new Exception(ie.ToString(), ie);
-                        }
-#endif
-                        if (aborted)
-                        {
-                            throw new MergeAbortedException("merge is aborted: " + SegString(dir));
-                        }
-                    }
-                }
-            }
-
-            /// <summary>
-            /// Set or clear whether this merge is paused paused (for example
-            ///  <seealso cref="ConcurrentMergeScheduler"/> will pause merges
-            ///  if too many are running).
-            /// </summary>
-            internal virtual void SetPause(bool paused)
-            {
-                lock (this)
-                {
-                    this.paused = paused;
-                    if (!paused)
-                    {
-                        // Wakeup merge thread, if it's waiting
-                        Monitor.PulseAll(this);
-                    }
-                }
-            }
-
-            /// <summary>
-            /// Returns true if this merge is paused.
-            /// </summary>
-            /// <seealso cref="SetPause(bool)"/>
-            internal virtual bool IsPaused
-            {
-                get
-                {
-                    lock (this)
-                    {
-                        return paused;
-                    }
-                }
-            }
-
-            /// <summary>
-            /// Returns a readable description of the current merge
-            ///  state.
-            /// </summary>
-            public virtual string SegString(Directory dir)
-            {
-                StringBuilder b = new StringBuilder();
-                int numSegments = Segments.Count;
-                for (int i = 0; i < numSegments; i++)
-                {
-                    if (i > 0)
-                    {
-                        b.Append(' ');
-                    }
-                    b.Append(Segments[i].ToString(dir, 0));
-                }
-                if (info != null)
-                {
-                    b.Append(" into ").Append(info.Info.Name);
-                }
-                if (maxNumSegments != -1)
-                {
-                    b.Append(" [maxNumSegments=" + maxNumSegments + "]");
-                }
-                if (aborted)
-                {
-                    b.Append(" [ABORTED]");
-                }
-                return b.ToString();
-            }
-
-            /// <summary>
-            /// Returns the total size in bytes of this merge. Note that this does not
-            /// indicate the size of the merged segment, but the
-            /// input total size. this is only set once the merge is
-            /// initialized by IndexWriter.
-            /// </summary>
-            public virtual long TotalBytesSize
-            {
-                get { return totalMergeBytes; }
-            }
-
-            /// <summary>
-            /// Returns the total number of documents that are included with this merge.
-            /// Note that this does not indicate the number of documents after the merge.
-            ///
-            /// </summary>
-            public virtual int TotalNumDocs
-            {
-                get
-                {
-                    int total = 0;
-                    foreach (SegmentCommitInfo info in Segments)
-                    {
-                        total += info.Info.DocCount;
-                    }
-                    return total;
-                }
-            }
-
-            /// <summary>
-            /// Return <seealso cref="MergeInfo"/> describing this merge. </summary>
-            public virtual MergeInfo MergeInfo
-            {
-                get
-                {
-                    return new MergeInfo(TotalDocCount, EstimatedMergeBytes, isExternal, maxNumSegments);
-                }
-            }
-        }
-
-        /// <summary>
-        /// A MergeSpecification instance provides the information
-        /// necessary to perform multiple merges.  It simply
-        /// contains a list of <seealso cref="OneMerge"/> instances.
-        /// </summary>
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public class MergeSpecification
-        {
-            /// <summary>
-            /// The subset of segments to be included in the primitive merge.
-            /// </summary>
-
-            public IList<OneMerge> Merges { get; private set; }
-
-            /// <summary>
-            /// Sole constructor.  Use {@link
-            ///  #add(MergePolicy.OneMerge)} to add merges.
-            /// </summary>
-            public MergeSpecification()
-            {
-                Merges = new List<OneMerge>();
-            }
-
-            /// <summary>
-            /// Adds the provided <seealso cref="OneMerge"/> to this
-            ///  specification.
-            /// </summary>
-            public virtual void Add(OneMerge merge)
-            {
-                Merges.Add(merge);
-            }
-
-            /// <summary>
-            /// Returns a description of the merges in this
-            ///  specification.
-            /// </summary>
-            public virtual string SegString(Directory dir)
-            {
-                StringBuilder b = new StringBuilder();
-                b.Append("MergeSpec:\n");
-                int count = Merges.Count;
-                for (int i = 0; i < count; i++)
-                {
-                    b.Append("  ").Append(1 + i).Append(": ").Append(Merges[i].SegString(dir));
-                }
-                return b.ToString();
-            }
-        }
-
-        /// <summary>
-        /// Exception thrown if there are any problems while
-        ///  executing a merge.
-        /// </summary>
-        // LUCENENET: All exeption classes should be marked serializable
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public class MergeException : Exception
-        {
-            private Directory dir;
-
-            /// <summary>
-            /// Create a <see cref="MergeException"/>. </summary>
-            public MergeException(string message, Directory dir)
-                : base(message)
-            {
-                this.dir = dir;
-            }
-
-            /// <summary>
-            /// Create a <see cref="MergeException"/>. </summary>
-            public MergeException(Exception exc, Directory dir)
-                : base(exc.ToString(), exc)
-            {
-                this.dir = dir;
-            }
-
-            // For testing purposes
-            internal MergeException(string message)
-                : base(message)
-            {
-            }
-
-#if FEATURE_SERIALIZABLE
-            /// <summary>
-            /// Initializes a new instance of this class with serialized data.
-            /// </summary>
-            /// <param name="info">The <see cref="SerializationInfo"/> that holds the serialized object data about the exception being thrown.</param>
-            /// <param name="context">The <see cref="StreamingContext"/> that contains contextual information about the source or destination.</param>
-            public MergeException(SerializationInfo info, StreamingContext context)
-                : base(info, context)
-            {
-            }
-#endif
-
-            /// <summary>
-            /// Returns the <seealso cref="Directory"/> of the index that hit
-            ///  the exception.
-            /// </summary>
-            public virtual Directory Directory
-            {
-                get
-                {
-                    return dir;
-                }
-            }
-        }
-
-        /// <summary>
-        /// Thrown when a merge was explicity aborted because
-        ///  <seealso cref="IndexWriter#close(boolean)"/> was called with
-        ///  <code>false</code>.  Normally this exception is
-        ///  privately caught and suppresed by <seealso cref="IndexWriter"/>.
-        /// </summary>
-        // LUCENENET: All exeption classes should be marked serializable
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public class MergeAbortedException : System.IO.IOException
-        {
-            /// <summary>
-            /// Create a <seealso cref="MergeAbortedException"/>. </summary>
-            public MergeAbortedException()
-                : base("merge is aborted")
-            {
-            }
-
-            /// <summary>
-            /// Create a <seealso cref="MergeAbortedException"/> with a
-            ///  specified message.
-            /// </summary>
-            public MergeAbortedException(string message)
-                : base(message)
-            {
-            }
-
-#if FEATURE_SERIALIZABLE
-            /// <summary>
-            /// Initializes a new instance of this class with serialized data.
-            /// </summary>
-            /// <param name="info">The <see cref="SerializationInfo"/> that holds the serialized object data about the exception being thrown.</param>
-            /// <param name="context">The <see cref="StreamingContext"/> that contains contextual information about the source or destination.</param>
-            public MergeAbortedException(SerializationInfo info, StreamingContext context)
-                : base(info, context)
-            {
-            }
-#endif
-        }
-
-        /// <summary>
-        /// Default ratio for compound file system usage. Set to <tt>1.0</tt>, always use
-        /// compound file system.
-        /// </summary>
-        protected static readonly double DEFAULT_NO_CFS_RATIO = 1.0;
-
-        /// <summary>
-        /// Default max segment size in order to use compound file system. Set to <seealso cref="Long#MAX_VALUE"/>.
-        /// </summary>
-        protected static readonly long DEFAULT_MAX_CFS_SEGMENT_SIZE = long.MaxValue;
-
-        /// <summary>
-        /// <seealso cref="IndexWriter"/> that contains this instance. </summary>
-        protected SetOnce<IndexWriter> m_writer;
-
-        /// <summary>
-        /// If the size of the merge segment exceeds this ratio of
-        ///  the total index size then it will remain in
-        ///  non-compound format
-        /// </summary>
-        protected double m_noCFSRatio = DEFAULT_NO_CFS_RATIO;
-
-        /// <summary>
-        /// If the size of the merged segment exceeds
-        ///  this value then it will not use compound file format.
-        /// </summary>
-        protected long m_maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
-
-        public virtual object Clone()
-        {
-            MergePolicy clone;
-            clone = (MergePolicy)base.MemberwiseClone();
-
-            clone.m_writer = new SetOnce<IndexWriter>();
-            return clone;
-        }
-
-        /// <summary>
-        /// Creates a new merge policy instance. Note that if you intend to use it
-        /// without passing it to <seealso cref="IndexWriter"/>, you should call
-        /// <seealso cref="#setIndexWriter(IndexWriter)"/>.
-        /// </summary>
-        public MergePolicy()
-            : this(DEFAULT_NO_CFS_RATIO, DEFAULT_MAX_CFS_SEGMENT_SIZE)
-        {
-        }
-
-        /// <summary>
-        /// Creates a new merge policy instance with default settings for noCFSRatio
-        /// and maxCFSSegmentSize. this ctor should be used by subclasses using different
-        /// defaults than the <seealso cref="MergePolicy"/>
-        /// </summary>
-        protected MergePolicy(double defaultNoCFSRatio, long defaultMaxCFSSegmentSize)
-        {
-            m_writer = new SetOnce<IndexWriter>();
-            this.m_noCFSRatio = defaultNoCFSRatio;
-            this.m_maxCFSSegmentSize = defaultMaxCFSSegmentSize;
-        }
-
-        /// <summary>
-        /// Sets the <seealso cref="IndexWriter"/> to use by this merge policy. this method is
-        /// allowed to be called only once, and is usually set by IndexWriter. If it is
-        /// called more than once, <seealso cref="AlreadySetException"/> is thrown.
-        /// </summary>
-        /// <seealso cref= SetOnce </seealso>
-        public virtual void SetIndexWriter(IndexWriter writer)
-        {
-            this.m_writer.Set(writer);
-        }
-
-        /// <summary>
-        /// Determine what set of merge operations are now necessary on the index.
-        /// <see cref="IndexWriter"/> calls this whenever there is a change to the segments.
-        /// this call is always synchronized on the <see cref="IndexWriter"/> instance so
-        /// only one thread at a time will call this method. </summary>
-        /// <param name="mergeTrigger"> the event that triggered the merge </param>
-        /// <param name="segmentInfos">
-        ///          the total set of segments in the index </param>
-        public abstract MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos);
-
-        /// <summary>
-        /// Determine what set of merge operations is necessary in
-        /// order to merge to &lt;= the specified segment count. <see cref="IndexWriter"/> calls this when its
-        /// <see cref="IndexWriter.ForceMerge"/> method is called. this call is always
-        /// synchronized on the <see cref="IndexWriter"/> instance so only one thread at a
-        /// time will call this method.
-        /// </summary>
-        /// <param name="segmentInfos">
-        ///          the total set of segments in the index </param>
-        /// <param name="maxSegmentCount">
-        ///          requested maximum number of segments in the index (currently this
-        ///          is always 1) </param>
-        /// <param name="segmentsToMerge">
-        ///          contains the specific SegmentInfo instances that must be merged
-        ///          away. this may be a subset of all
-        ///          SegmentInfos.  If the value is True for a
-        ///          given SegmentInfo, that means this segment was
-        ///          an original segment present in the
-        ///          to-be-merged index; else, it was a segment
-        ///          produced by a cascaded merge. </param>
-        public abstract MergeSpecification FindForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, IDictionary<SegmentCommitInfo, bool?> segmentsToMerge);
-
-        /// <summary>
-        /// Determine what set of merge operations is necessary in order to expunge all
-        /// deletes from the index.
-        /// </summary>
-        /// <param name="segmentInfos">
-        ///          the total set of segments in the index </param>
-        public abstract MergeSpecification FindForcedDeletesMerges(SegmentInfos segmentInfos);
-
-        /// <summary>
-        /// Release all resources for the policy.
-        /// </summary>
-        // LUCENENET specific - implementing proper dispose pattern
-        public void Dispose()
-        {
-            Dispose(true);
-            GC.SuppressFinalize(this);
-        }
-
-        /// <summary>
-        /// Release all resources for the policy.
-        /// </summary>
-        protected abstract void Dispose(bool disposing);
-
-        /// <summary>
-        /// Returns true if a new segment (regardless of its origin) should use the
-        /// compound file format. The default implementation returns <code>true</code>
-        /// iff the size of the given mergedInfo is less or equal to
-        /// <seealso cref="#getMaxCFSSegmentSizeMB()"/> and the size is less or equal to the
-        /// TotalIndexSize * <seealso cref="#getNoCFSRatio()"/> otherwise <code>false</code>.
-        /// </summary>
-        public virtual bool UseCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo)
-        {
-            if (NoCFSRatio == 0.0)
-            {
-                return false;
-            }
-            long mergedInfoSize = Size(mergedInfo);
-            if (mergedInfoSize > m_maxCFSSegmentSize)
-            {
-                return false;
-            }
-            if (NoCFSRatio >= 1.0)
-            {
-                return true;
-            }
-            long totalSize = 0;
-            foreach (SegmentCommitInfo info in infos.Segments)
-            {
-                totalSize += Size(info);
-            }
-            return mergedInfoSize <= NoCFSRatio * totalSize;
-        }
-
-        /// <summary>
-        /// Return the byte size of the provided {@link
-        ///  SegmentCommitInfo}, pro-rated by percentage of
-        ///  non-deleted documents is set.
-        /// </summary>
-        protected virtual long Size(SegmentCommitInfo info)
-        {
-            long byteSize = info.GetSizeInBytes();
-            int delCount = m_writer.Get().NumDeletedDocs(info);
-            double delRatio = (info.Info.DocCount <= 0 ? 0.0f : ((float)delCount / (float)info.Info.DocCount));
-            Debug.Assert(delRatio <= 1.0);
-            return (info.Info.DocCount <= 0 ? byteSize : (long)(byteSize * (1.0 - delRatio)));
-        }
-
-        /// <summary>
-        /// Returns true if this single info is already fully merged (has no
-        ///  pending deletes, is in the same dir as the
-        ///  writer, and matches the current compound file setting
-        /// </summary>
-        protected bool IsMerged(SegmentInfos infos, SegmentCommitInfo info)
-        {
-            IndexWriter w = m_writer.Get();
-            Debug.Assert(w != null);
-            bool hasDeletions = w.NumDeletedDocs(info) > 0;
-            return !hasDeletions
-#pragma warning disable 612, 618
-                && !info.Info.HasSeparateNorms
-#pragma warning restore 612, 618
-                && info.Info.Dir == w.Directory 
-                && UseCompoundFile(infos, info) == info.Info.UseCompoundFile;
-        }
-
-        /// <summary>
-        /// Returns current {@code noCFSRatio}.
-        /// </summary>
-        ///  <seealso cref= #setNoCFSRatio  </seealso>
-        public double NoCFSRatio
-        {
-            get
-            {
-                return m_noCFSRatio;
-            }
-            set
-            {
-                if (value < 0.0 || value > 1.0)
-                {
-                    throw new System.ArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + value);
-                }
-                this.m_noCFSRatio = value;
-            }
-        }
-
-        /// <summary>
-        /// Returns the largest size allowed for a compound file segment </summary>
-        public double MaxCFSSegmentSizeMB
-        {
-            get
-            {
-                return m_maxCFSSegmentSize / 1024 / 1024.0;
-            }
-            set
-            {
-                if (value < 0.0)
-                {
-                    throw new System.ArgumentException("maxCFSSegmentSizeMB must be >=0 (got " + value + ")");
-                }
-                value *= 1024 * 1024;
-                this.m_maxCFSSegmentSize = (value > long.MaxValue) ? long.MaxValue : (long)value;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/MergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/MergeScheduler.cs b/src/Lucene.Net.Core/Index/MergeScheduler.cs
deleted file mode 100644
index 224c44b..0000000
--- a/src/Lucene.Net.Core/Index/MergeScheduler.cs
+++ /dev/null
@@ -1,68 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// <p>Expert: <seealso cref="IndexWriter"/> uses an instance
-    ///  implementing this interface to execute the merges
-    ///  selected by a <seealso cref="MergePolicy"/>.  The default
-    ///  MergeScheduler is <seealso cref="ConcurrentMergeScheduler"/>.</p>
-    ///  <p>Implementers of sub-classes should make sure that <seealso cref="#clone()"/>
-    ///  returns an independent instance able to work with any <seealso cref="IndexWriter"/>
-    ///  instance.</p>
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class MergeScheduler : IDisposable, IMergeScheduler
-    {
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
-        /// </summary>
-        protected MergeScheduler()
-        {
-        }
-
-        /// <summary>
-        /// Run the merges provided by <see cref="IndexWriter.NextMerge()"/>. </summary>
-        /// <param name="writer"> the <see cref="IndexWriter"/> to obtain the merges from. </param>
-        /// <param name="trigger"> the <see cref="MergeTrigger"/> that caused this merge to happen </param>
-        /// <param name="newMergesFound"> <c>true</c> iff any new merges were found by the caller; otherwise <c>false</c>
-        ///  </param>
-        public abstract void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound);
-
-        public void Dispose()
-        {
-            Dispose(true);
-            GC.SuppressFinalize(this);
-        }
-
-        /// <summary>
-        /// Close this MergeScheduler. </summary>
-        protected abstract void Dispose(bool disposing);
-
-        public virtual IMergeScheduler Clone()
-        {
-            return (MergeScheduler)base.MemberwiseClone();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/MergeState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/MergeState.cs b/src/Lucene.Net.Core/Index/MergeState.cs
deleted file mode 100644
index 45d5526..0000000
--- a/src/Lucene.Net.Core/Index/MergeState.cs
+++ /dev/null
@@ -1,288 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Diagnostics.CodeAnalysis;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using IBits = Lucene.Net.Util.IBits;
-    using Directory = Lucene.Net.Store.Directory;
-    using InfoStream = Lucene.Net.Util.InfoStream;
-    using MonotonicAppendingInt64Buffer = Lucene.Net.Util.Packed.MonotonicAppendingInt64Buffer;
-
-    /// <summary>
-    /// Holds common state used during segment merging.
-    ///
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class MergeState
-    {
-        /// <summary>
-        /// Remaps docids around deletes during merge
-        /// </summary>
-        public abstract class DocMap
-        {
-            internal DocMap()
-            {
-            }
-
-            /// <summary>
-            /// Returns the mapped docID corresponding to the provided one. </summary>
-            public abstract int Get(int docID);
-
-            /// <summary>
-            /// Returns the total number of documents, ignoring
-            ///  deletions.
-            /// </summary>
-            public abstract int MaxDoc { get; }
-
-            /// <summary>
-            /// Returns the number of not-deleted documents. </summary>
-            public int NumDocs
-            {
-                get { return MaxDoc - NumDeletedDocs; }
-            }
-
-            /// <summary>
-            /// Returns the number of deleted documents. </summary>
-            public abstract int NumDeletedDocs { get; }
-
-            /// <summary>
-            /// Returns true if there are any deletions. </summary>
-            public virtual bool HasDeletions
-            {
-                get { return NumDeletedDocs > 0; }
-            }
-
-            /// <summary>
-            /// Creates a <seealso cref="DocMap"/> instance appropriate for
-            ///  this reader.
-            /// </summary>
-            public static DocMap Build(AtomicReader reader)
-            {
-                int maxDoc = reader.MaxDoc;
-                if (!reader.HasDeletions)
-                {
-                    return new NoDelDocMap(maxDoc);
-                }
-                IBits liveDocs = reader.LiveDocs;
-                return Build(maxDoc, liveDocs);
-            }
-
-            internal static DocMap Build(int maxDoc, IBits liveDocs)
-            {
-                Debug.Assert(liveDocs != null);
-                MonotonicAppendingInt64Buffer docMap = new MonotonicAppendingInt64Buffer();
-                int del = 0;
-                for (int i = 0; i < maxDoc; ++i)
-                {
-                    docMap.Add(i - del);
-                    if (!liveDocs.Get(i))
-                    {
-                        ++del;
-                    }
-                }
-                docMap.Freeze();
-                int numDeletedDocs = del;
-                Debug.Assert(docMap.Count == maxDoc);
-                return new DocMapAnonymousInnerClassHelper(maxDoc, liveDocs, docMap, numDeletedDocs);
-            }
-
-            private class DocMapAnonymousInnerClassHelper : DocMap
-            {
-                private int maxDoc;
-                private IBits liveDocs;
-                private MonotonicAppendingInt64Buffer docMap;
-                private int numDeletedDocs;
-
-                public DocMapAnonymousInnerClassHelper(int maxDoc, IBits liveDocs, MonotonicAppendingInt64Buffer docMap, int numDeletedDocs)
-                {
-                    this.maxDoc = maxDoc;
-                    this.liveDocs = liveDocs;
-                    this.docMap = docMap;
-                    this.numDeletedDocs = numDeletedDocs;
-                }
-
-                public override int Get(int docID)
-                {
-                    if (!liveDocs.Get(docID))
-                    {
-                        return -1;
-                    }
-                    return (int)docMap.Get(docID);
-                }
-
-                public override int MaxDoc
-                {
-                    get { return maxDoc; }
-                }
-
-                public override int NumDeletedDocs
-                {
-                    get { return numDeletedDocs; }
-                }
-            }
-        }
-
-        private sealed class NoDelDocMap : DocMap
-        {
-            private readonly int maxDoc;
-
-            internal NoDelDocMap(int maxDoc)
-            {
-                this.maxDoc = maxDoc;
-            }
-
-            public override int Get(int docID)
-            {
-                return docID;
-            }
-
-            public override int MaxDoc
-            {
-                get { return maxDoc; }
-            }
-
-            public override int NumDeletedDocs
-            {
-                get { return 0; }
-            }
-        }
-
-        /// <summary>
-        /// <seealso cref="SegmentInfo"/> of the newly merged segment. </summary>
-        public SegmentInfo SegmentInfo { get; private set; }
-
-        /// <summary>
-        /// <seealso cref="FieldInfos"/> of the newly merged segment. </summary>
-        public FieldInfos FieldInfos { get; set; }
-
-        /// <summary>
-        /// Readers being merged. </summary>
-        public IList<AtomicReader> Readers { get; private set; }
-
-        /// <summary>
-        /// Maps docIDs around deletions. </summary>
-        [WritableArray]
-        [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
-        public DocMap[] DocMaps { get; set; }
-
-        /// <summary>
-        /// New docID base per reader. </summary>
-        [WritableArray]
-        [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
-        public int[] DocBase { get; set; }
-
-        /// <summary>
-        /// Holds the CheckAbort instance, which is invoked
-        ///  periodically to see if the merge has been aborted.
-        /// </summary>
-        public CheckAbort CheckAbort { get; private set; }
-
-        /// <summary>
-        /// InfoStream for debugging messages. </summary>
-        public InfoStream InfoStream { get; private set; }
-
-        // TODO: get rid of this? it tells you which segments are 'aligned' (e.g. for bulk merging)
-        // but is this really so expensive to compute again in different components, versus once in SM?
-
-        /// <summary>
-        /// <seealso cref="SegmentReader"/>s that have identical field
-        /// name/number mapping, so their stored fields and term
-        /// vectors may be bulk merged.
-        /// </summary>
-        [WritableArray]
-        [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
-        public SegmentReader[] MatchingSegmentReaders { get; set; }
-
-        /// <summary>
-        /// How many <seealso cref="#matchingSegmentReaders"/> are set. </summary>
-        public int MatchedCount { get; set; }
-
-        /// <summary>
-        /// Sole constructor. </summary>
-        internal MergeState(IList<AtomicReader> readers, SegmentInfo segmentInfo, InfoStream infoStream, CheckAbort checkAbort)
-        {
-            this.Readers = readers;
-            this.SegmentInfo = segmentInfo;
-            this.InfoStream = infoStream;
-            this.CheckAbort = checkAbort;
-        }
-    }
-
-    /// <summary>
-    /// Class for recording units of work when merging segments.
-    /// </summary>
-    public class CheckAbort // LUCENENET Specific: De-nested this class to fix CLS naming issue
-    {
-        private double workCount;
-        private readonly MergePolicy.OneMerge merge;
-        private readonly Directory dir;
-
-        /// <summary>
-        /// Creates a #CheckAbort instance. </summary>
-        public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
-        {
-            this.merge = merge;
-            this.dir = dir;
-        }
-
-        /// <summary>
-        /// Records the fact that roughly units amount of work
-        /// have been done since this method was last called.
-        /// When adding time-consuming code into SegmentMerger,
-        /// you should test different values for units to ensure
-        /// that the time in between calls to merge.checkAborted
-        /// is up to ~ 1 second.
-        /// </summary>
-        public virtual void Work(double units)
-        {
-            workCount += units;
-            if (workCount >= 10000.0)
-            {
-                merge.CheckAborted(dir);
-                workCount = 0;
-            }
-        }
-
-        /// <summary>
-        /// If you use this: IW.close(false) cannot abort your merge!
-        /// @lucene.internal
-        /// </summary>
-        public static readonly CheckAbort NONE = new CheckAbortAnonymousInnerClassHelper();
-
-        private class CheckAbortAnonymousInnerClassHelper : CheckAbort
-        {
-            public CheckAbortAnonymousInnerClassHelper()
-                : base(null, null)
-            {
-            }
-
-            public override void Work(double units)
-            {
-                // do nothing
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/MergeTrigger.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/MergeTrigger.cs b/src/Lucene.Net.Core/Index/MergeTrigger.cs
deleted file mode 100644
index 113be31..0000000
--- a/src/Lucene.Net.Core/Index/MergeTrigger.cs
+++ /dev/null
@@ -1,53 +0,0 @@
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// MergeTrigger is passed to
-    /// <see cref="MergePolicy.FindMerges(MergeTrigger, SegmentInfos)"/> to indicate the
-    /// event that triggered the merge.
-    /// </summary>
-    public enum MergeTrigger
-    {
-        /// <summary>
-        /// Merge was triggered by a segment flush.
-        /// </summary>
-        SEGMENT_FLUSH,
-
-        /// <summary>
-        /// Merge was triggered by a full flush. Full flushes
-        /// can be caused by a commit, NRT reader reopen or a close call on the index writer.
-        /// </summary>
-        FULL_FLUSH,
-
-        /// <summary>
-        /// Merge has been triggered explicitly by the user.
-        /// </summary>
-        EXPLICIT,
-
-        /// <summary>
-        /// Merge was triggered by a successfully finished merge.
-        /// </summary>
-        MERGE_FINISHED,
-
-        /// <summary>
-        /// Merge was triggered by a closing IndexWriter.
-        /// </summary>
-        CLOSING
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/MultiBits.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/MultiBits.cs b/src/Lucene.Net.Core/Index/MultiBits.cs
deleted file mode 100644
index 3ac7e67..0000000
--- a/src/Lucene.Net.Core/Index/MultiBits.cs
+++ /dev/null
@@ -1,141 +0,0 @@
-using System;
-using System.Diagnostics;
-using System.Text;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using IBits = Lucene.Net.Util.IBits;
-
-    /// <summary>
-    /// Concatenates multiple Bits together, on every lookup.
-    ///
-    /// <p><b>NOTE</b>: this is very costly, as every lookup must
-    /// do a binary search to locate the right sub-reader.
-    ///
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal sealed class MultiBits : IBits
-    {
-        private readonly IBits[] subs;
-
-        // length is 1+subs.length (the last entry has the maxDoc):
-        private readonly int[] starts;
-
-        private readonly bool sefaultValue;
-
-        public MultiBits(IBits[] subs, int[] starts, bool defaultValue)
-        {
-            Debug.Assert(starts.Length == 1 + subs.Length);
-            this.subs = subs;
-            this.starts = starts;
-            this.sefaultValue = defaultValue;
-        }
-
-        private bool CheckLength(int reader, int doc)
-        {
-            int length = starts[1 + reader] - starts[reader];
-            Debug.Assert(doc - starts[reader] < length, "doc=" + doc + " reader=" + reader + " starts[reader]=" + starts[reader] + " length=" + length);
-            return true;
-        }
-
-        public bool Get(int doc)
-        {
-            int reader = ReaderUtil.SubIndex(doc, starts);
-            Debug.Assert(reader != -1);
-            IBits bits = subs[reader];
-            if (bits == null)
-            {
-                return sefaultValue;
-            }
-            else
-            {
-                Debug.Assert(CheckLength(reader, doc));
-                return bits.Get(doc - starts[reader]);
-            }
-        }
-
-        public override string ToString()
-        {
-            StringBuilder b = new StringBuilder();
-            b.Append(subs.Length + " subs: ");
-            for (int i = 0; i < subs.Length; i++)
-            {
-                if (i != 0)
-                {
-                    b.Append("; ");
-                }
-                if (subs[i] == null)
-                {
-                    b.Append("s=" + starts[i] + " l=null");
-                }
-                else
-                {
-                    b.Append("s=" + starts[i] + " l=" + subs[i].Length + " b=" + subs[i]);
-                }
-            }
-            b.Append(" end=" + starts[subs.Length]);
-            return b.ToString();
-        }
-
-        /// <summary>
-        /// Represents a sub-Bits from
-        /// <seealso cref="MultiBits#getMatchingSub(Lucene.Net.Index.ReaderSlice) getMatchingSub()"/>.
-        /// </summary>
-        public sealed class SubResult
-        {
-            public bool Matches { get; internal set; }
-            public IBits Result { get; internal set; }
-        }
-
-        /// <summary>
-        /// Returns a sub-Bits matching the provided <code>slice</code>
-        /// <p>
-        /// Because <code>null</code> usually has a special meaning for
-        /// Bits (e.g. no deleted documents), you must check
-        /// <seealso cref="SubResult#matches"/> instead to ensure the sub was
-        /// actually found.
-        /// </summary>
-        public SubResult GetMatchingSub(ReaderSlice slice)
-        {
-            int reader = ReaderUtil.SubIndex(slice.Start, starts);
-            Debug.Assert(reader != -1);
-            Debug.Assert(reader < subs.Length, "slice=" + slice + " starts[-1]=" + starts[starts.Length - 1]);
-            SubResult subResult = new SubResult();
-            if (starts[reader] == slice.Start && starts[1 + reader] == slice.Start + slice.Length)
-            {
-                subResult.Matches = true;
-                subResult.Result = subs[reader];
-            }
-            else
-            {
-                subResult.Matches = false;
-            }
-            return subResult;
-        }
-
-        public int Length
-        {
-            get { return starts[starts.Length - 1]; }
-        }
-    }
-}
\ No newline at end of file


Mime
View raw message