lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [16/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:22 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/FreqProxTermsWriterPerField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/FreqProxTermsWriterPerField.cs b/src/Lucene.Net.Core/Index/FreqProxTermsWriterPerField.cs
deleted file mode 100644
index 40d7936..0000000
--- a/src/Lucene.Net.Core/Index/FreqProxTermsWriterPerField.cs
+++ /dev/null
@@ -1,674 +0,0 @@
-using Lucene.Net.Analysis.TokenAttributes;
-using Lucene.Net.Support;
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using FieldsConsumer = Lucene.Net.Codecs.FieldsConsumer;
-    using FixedBitSet = Lucene.Net.Util.FixedBitSet;
-    using OffsetAttribute = Lucene.Net.Analysis.TokenAttributes.OffsetAttribute;
-    using PayloadAttribute = Lucene.Net.Analysis.TokenAttributes.PayloadAttribute;
-    using PostingsConsumer = Lucene.Net.Codecs.PostingsConsumer;
-    using RamUsageEstimator = Lucene.Net.Util.RamUsageEstimator;
-    using TermsConsumer = Lucene.Net.Codecs.TermsConsumer;
-    using TermStats = Lucene.Net.Codecs.TermStats;
-
-    // TODO: break into separate freq and prox writers as
-    // codecs; make separate container (tii/tis/skip/*) that can
-    // be configured as any number of files 1..N
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal sealed class FreqProxTermsWriterPerField : TermsHashConsumerPerField, IComparable<FreqProxTermsWriterPerField>
-    {
-        internal readonly FreqProxTermsWriter parent;
-        internal readonly TermsHashPerField termsHashPerField;
-        internal readonly FieldInfo fieldInfo;
-        internal readonly DocumentsWriterPerThread.DocState docState;
-        internal readonly FieldInvertState fieldState;
-        private bool hasFreq;
-        private bool hasProx;
-        private bool hasOffsets;
-        internal IPayloadAttribute payloadAttribute;
-        internal IOffsetAttribute offsetAttribute;
-
-        public FreqProxTermsWriterPerField(TermsHashPerField termsHashPerField, FreqProxTermsWriter parent, FieldInfo fieldInfo)
-        {
-            this.termsHashPerField = termsHashPerField;
-            this.parent = parent;
-            this.fieldInfo = fieldInfo;
-            docState = termsHashPerField.docState;
-            fieldState = termsHashPerField.fieldState;
-            SetIndexOptions(fieldInfo.IndexOptions);
-        }
-
-        internal override int StreamCount
-        {
-            get
-            {
-                if (!hasProx)
-                {
-                    return 1;
-                }
-                else
-                {
-                    return 2;
-                }
-            }
-        }
-
-        internal override void Finish()
-        {
-            if (hasPayloads)
-            {
-                fieldInfo.SetStorePayloads();
-            }
-        }
-
-        internal bool hasPayloads;
-
-        [ExceptionToNetNumericConvention]
-        internal override void SkippingLongTerm()
-        {
-        }
-
-        public int CompareTo(FreqProxTermsWriterPerField other)
-        {
-            return fieldInfo.Name.CompareToOrdinal(other.fieldInfo.Name);
-        }
-
-        // Called after flush
-        internal void Reset()
-        {
-            // Record, up front, whether our in-RAM format will be
-            // with or without term freqs:
-            SetIndexOptions(fieldInfo.IndexOptions);
-            payloadAttribute = null;
-        }
-
-        private void SetIndexOptions(IndexOptions indexOptions)
-        {
-            if (indexOptions == IndexOptions.NONE)
-            {
-                // field could later be updated with indexed=true, so set everything on
-                hasFreq = hasProx = hasOffsets = true;
-            }
-            else
-            {
-                hasFreq = indexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
-                hasProx = indexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
-                hasOffsets = indexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
-            }
-        }
-
-        internal override bool Start(IIndexableField[] fields, int count)
-        {
-            for (int i = 0; i < count; i++)
-            {
-                if (fields[i].FieldType.IsIndexed)
-                {
-                    return true;
-                }
-            }
-            return false;
-        }
-
-        internal override void Start(IIndexableField f)
-        {
-            if (fieldState.AttributeSource.HasAttribute<IPayloadAttribute>())
-            {
-                payloadAttribute = fieldState.AttributeSource.GetAttribute<IPayloadAttribute>();
-            }
-            else
-            {
-                payloadAttribute = null;
-            }
-            if (hasOffsets)
-            {
-                offsetAttribute = fieldState.AttributeSource.AddAttribute<IOffsetAttribute>();
-            }
-            else
-            {
-                offsetAttribute = null;
-            }
-        }
-
-        internal void WriteProx(int termID, int proxCode)
-        {
-            //System.out.println("writeProx termID=" + termID + " proxCode=" + proxCode);
-            Debug.Assert(hasProx);
-            BytesRef payload;
-            if (payloadAttribute == null)
-            {
-                payload = null;
-            }
-            else
-            {
-                payload = payloadAttribute.Payload;
-            }
-
-            if (payload != null && payload.Length > 0)
-            {
-                termsHashPerField.WriteVInt32(1, (proxCode << 1) | 1);
-                termsHashPerField.WriteVInt32(1, payload.Length);
-                termsHashPerField.WriteBytes(1, payload.Bytes, payload.Offset, payload.Length);
-                hasPayloads = true;
-            }
-            else
-            {
-                termsHashPerField.WriteVInt32(1, proxCode << 1);
-            }
-
-            FreqProxPostingsArray postings = (FreqProxPostingsArray)termsHashPerField.postingsArray;
-            postings.lastPositions[termID] = fieldState.Position;
-        }
-
-        internal void WriteOffsets(int termID, int offsetAccum)
-        {
-            Debug.Assert(hasOffsets);
-            int startOffset = offsetAccum + offsetAttribute.StartOffset;
-            int endOffset = offsetAccum + offsetAttribute.EndOffset;
-            FreqProxPostingsArray postings = (FreqProxPostingsArray)termsHashPerField.postingsArray;
-            Debug.Assert(startOffset - postings.lastOffsets[termID] >= 0);
-            termsHashPerField.WriteVInt32(1, startOffset - postings.lastOffsets[termID]);
-            termsHashPerField.WriteVInt32(1, endOffset - startOffset);
-
-            postings.lastOffsets[termID] = startOffset;
-        }
-
-        internal override void NewTerm(int termID)
-        {
-            // First time we're seeing this term since the last
-            // flush
-            Debug.Assert(docState.TestPoint("FreqProxTermsWriterPerField.newTerm start"));
-
-            FreqProxPostingsArray postings = (FreqProxPostingsArray)termsHashPerField.postingsArray;
-            postings.lastDocIDs[termID] = docState.docID;
-            if (!hasFreq)
-            {
-                postings.lastDocCodes[termID] = docState.docID;
-            }
-            else
-            {
-                postings.lastDocCodes[termID] = docState.docID << 1;
-                postings.termFreqs[termID] = 1;
-                if (hasProx)
-                {
-                    WriteProx(termID, fieldState.Position);
-                    if (hasOffsets)
-                    {
-                        WriteOffsets(termID, fieldState.Offset);
-                    }
-                }
-                else
-                {
-                    Debug.Assert(!hasOffsets);
-                }
-            }
-            fieldState.MaxTermFrequency = Math.Max(1, fieldState.MaxTermFrequency);
-            fieldState.UniqueTermCount++;
-        }
-
-        internal override void AddTerm(int termID)
-        {
-            Debug.Assert(docState.TestPoint("FreqProxTermsWriterPerField.addTerm start"));
-
-            FreqProxPostingsArray postings = (FreqProxPostingsArray)termsHashPerField.postingsArray;
-
-            Debug.Assert(!hasFreq || postings.termFreqs[termID] > 0);
-
-            if (!hasFreq)
-            {
-                Debug.Assert(postings.termFreqs == null);
-                if (docState.docID != postings.lastDocIDs[termID])
-                {
-                    Debug.Assert(docState.docID > postings.lastDocIDs[termID]);
-                    termsHashPerField.WriteVInt32(0, postings.lastDocCodes[termID]);
-                    postings.lastDocCodes[termID] = docState.docID - postings.lastDocIDs[termID];
-                    postings.lastDocIDs[termID] = docState.docID;
-                    fieldState.UniqueTermCount++;
-                }
-            }
-            else if (docState.docID != postings.lastDocIDs[termID])
-            {
-                Debug.Assert(docState.docID > postings.lastDocIDs[termID], "id: " + docState.docID + " postings ID: " + postings.lastDocIDs[termID] + " termID: " + termID);
-                // Term not yet seen in the current doc but previously
-                // seen in other doc(s) since the last flush
-
-                // Now that we know doc freq for previous doc,
-                // write it & lastDocCode
-                if (1 == postings.termFreqs[termID])
-                {
-                    termsHashPerField.WriteVInt32(0, postings.lastDocCodes[termID] | 1);
-                }
-                else
-                {
-                    termsHashPerField.WriteVInt32(0, postings.lastDocCodes[termID]);
-                    termsHashPerField.WriteVInt32(0, postings.termFreqs[termID]);
-                }
-                postings.termFreqs[termID] = 1;
-                fieldState.MaxTermFrequency = Math.Max(1, fieldState.MaxTermFrequency);
-                postings.lastDocCodes[termID] = (docState.docID - postings.lastDocIDs[termID]) << 1;
-                postings.lastDocIDs[termID] = docState.docID;
-                if (hasProx)
-                {
-                    WriteProx(termID, fieldState.Position);
-                    if (hasOffsets)
-                    {
-                        postings.lastOffsets[termID] = 0;
-                        WriteOffsets(termID, fieldState.Offset);
-                    }
-                }
-                else
-                {
-                    Debug.Assert(!hasOffsets);
-                }
-                fieldState.UniqueTermCount++;
-            }
-            else
-            {
-                fieldState.MaxTermFrequency = Math.Max(fieldState.MaxTermFrequency, ++postings.termFreqs[termID]);
-                if (hasProx)
-                {
-                    WriteProx(termID, fieldState.Position - postings.lastPositions[termID]);
-                }
-                if (hasOffsets)
-                {
-                    WriteOffsets(termID, fieldState.Offset);
-                }
-            }
-        }
-
-        internal override ParallelPostingsArray CreatePostingsArray(int size)
-        {
-            return new FreqProxPostingsArray(size, hasFreq, hasProx, hasOffsets);
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        internal sealed class FreqProxPostingsArray : ParallelPostingsArray
-        {
-            public FreqProxPostingsArray(int size, bool writeFreqs, bool writeProx, bool writeOffsets)
-                : base(size)
-            {
-                if (writeFreqs)
-                {
-                    termFreqs = new int[size];
-                }
-                lastDocIDs = new int[size];
-                lastDocCodes = new int[size];
-                if (writeProx)
-                {
-                    lastPositions = new int[size];
-                    if (writeOffsets)
-                    {
-                        lastOffsets = new int[size];
-                    }
-                }
-                else
-                {
-                    Debug.Assert(!writeOffsets);
-                }
-                //System.out.println("PA init freqs=" + writeFreqs + " pos=" + writeProx + " offs=" + writeOffsets);
-            }
-
-            internal int[] termFreqs; // # times this term occurs in the current doc
-            internal int[] lastDocIDs; // Last docID where this term occurred
-            internal int[] lastDocCodes; // Code for prior doc
-            internal int[] lastPositions; // Last position where this term occurred
-            internal int[] lastOffsets; // Last endOffset where this term occurred
-
-            internal override ParallelPostingsArray NewInstance(int size)
-            {
-                return new FreqProxPostingsArray(size, termFreqs != null, lastPositions != null, lastOffsets != null);
-            }
-
-            internal override void CopyTo(ParallelPostingsArray toArray, int numToCopy)
-            {
-                Debug.Assert(toArray is FreqProxPostingsArray);
-                FreqProxPostingsArray to = (FreqProxPostingsArray)toArray;
-
-                base.CopyTo(toArray, numToCopy);
-
-                Array.Copy(lastDocIDs, 0, to.lastDocIDs, 0, numToCopy);
-                Array.Copy(lastDocCodes, 0, to.lastDocCodes, 0, numToCopy);
-                if (lastPositions != null)
-                {
-                    Debug.Assert(to.lastPositions != null);
-                    Array.Copy(lastPositions, 0, to.lastPositions, 0, numToCopy);
-                }
-                if (lastOffsets != null)
-                {
-                    Debug.Assert(to.lastOffsets != null);
-                    Array.Copy(lastOffsets, 0, to.lastOffsets, 0, numToCopy);
-                }
-                if (termFreqs != null)
-                {
-                    Debug.Assert(to.termFreqs != null);
-                    Array.Copy(termFreqs, 0, to.termFreqs, 0, numToCopy);
-                }
-            }
-
-            internal override int BytesPerPosting()
-            {
-                int bytes = ParallelPostingsArray.BYTES_PER_POSTING + 2 * RamUsageEstimator.NUM_BYTES_INT32;
-                if (lastPositions != null)
-                {
-                    bytes += RamUsageEstimator.NUM_BYTES_INT32;
-                }
-                if (lastOffsets != null)
-                {
-                    bytes += RamUsageEstimator.NUM_BYTES_INT32;
-                }
-                if (termFreqs != null)
-                {
-                    bytes += RamUsageEstimator.NUM_BYTES_INT32;
-                }
-
-                return bytes;
-            }
-        }
-
-        public void Abort()
-        {
-        }
-
-        internal BytesRef payload;
-
-        /* Walk through all unique text tokens (Posting
-         * instances) found in this field and serialize them
-         * into a single RAM segment. */
-
-        internal void Flush(string fieldName, FieldsConsumer consumer, SegmentWriteState state)
-        {
-            if (!fieldInfo.IsIndexed)
-            {
-                return; // nothing to flush, don't bother the codec with the unindexed field
-            }
-
-            TermsConsumer termsConsumer = consumer.AddField(fieldInfo);
-            IComparer<BytesRef> termComp = termsConsumer.Comparer;
-
-            // CONFUSING: this.indexOptions holds the index options
-            // that were current when we first saw this field.  But
-            // it's possible this has changed, eg when other
-            // documents are indexed that cause a "downgrade" of the
-            // IndexOptions.  So we must decode the in-RAM buffer
-            // according to this.indexOptions, but then write the
-            // new segment to the directory according to
-            // currentFieldIndexOptions:
-            IndexOptions currentFieldIndexOptions = fieldInfo.IndexOptions;
-            Debug.Assert(currentFieldIndexOptions != IndexOptions.NONE);
-
-            bool writeTermFreq = currentFieldIndexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
-            bool writePositions = currentFieldIndexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
-            bool writeOffsets = currentFieldIndexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
-
-            bool readTermFreq = this.hasFreq;
-            bool readPositions = this.hasProx;
-            bool readOffsets = this.hasOffsets;
-
-            //System.out.println("flush readTF=" + readTermFreq + " readPos=" + readPositions + " readOffs=" + readOffsets);
-
-            // Make sure FieldInfo.update is working correctly!:
-            Debug.Assert(!writeTermFreq || readTermFreq);
-            Debug.Assert(!writePositions || readPositions);
-            Debug.Assert(!writeOffsets || readOffsets);
-
-            Debug.Assert(!writeOffsets || writePositions);
-
-            IDictionary<Term, int?> segDeletes;
-            if (state.SegUpdates != null && state.SegUpdates.terms.Count > 0)
-            {
-                segDeletes = state.SegUpdates.terms;
-            }
-            else
-            {
-                segDeletes = null;
-            }
-
-            int[] termIDs = termsHashPerField.SortPostings(termComp);
-            int numTerms = termsHashPerField.bytesHash.Count;
-            BytesRef text = new BytesRef();
-            FreqProxPostingsArray postings = (FreqProxPostingsArray)termsHashPerField.postingsArray;
-            ByteSliceReader freq = new ByteSliceReader();
-            ByteSliceReader prox = new ByteSliceReader();
-
-            FixedBitSet visitedDocs = new FixedBitSet(state.SegmentInfo.DocCount);
-            long sumTotalTermFreq = 0;
-            long sumDocFreq = 0;
-
-            Term protoTerm = new Term(fieldName);
-            for (int i = 0; i < numTerms; i++)
-            {
-                int termID = termIDs[i];
-                // Get BytesRef
-                int textStart = postings.textStarts[termID];
-                termsHashPerField.bytePool.SetBytesRef(text, textStart);
-
-                termsHashPerField.InitReader(freq, termID, 0);
-                if (readPositions || readOffsets)
-                {
-                    termsHashPerField.InitReader(prox, termID, 1);
-                }
-
-                // TODO: really TermsHashPerField should take over most
-                // of this loop, including merge sort of terms from
-                // multiple threads and interacting with the
-                // TermsConsumer, only calling out to us (passing us the
-                // DocsConsumer) to handle delivery of docs/positions
-
-                PostingsConsumer postingsConsumer = termsConsumer.StartTerm(text);
-
-                int? delDocLimit;
-                if (segDeletes != null)
-                {
-                    protoTerm.Bytes = text;
-                    int? docIDUpto;
-                    segDeletes.TryGetValue(protoTerm, out docIDUpto);
-                    if (docIDUpto != null)
-                    {
-                        delDocLimit = docIDUpto;
-                    }
-                    else
-                    {
-                        delDocLimit = 0;
-                    }
-                }
-                else
-                {
-                    delDocLimit = 0;
-                }
-
-                // Now termStates has numToMerge FieldMergeStates
-                // which all share the same term.  Now we must
-                // interleave the docID streams.
-                int docFreq = 0;
-                long totalTermFreq = 0;
-                int docID = 0;
-
-                while (true)
-                {
-                    //System.out.println("  cycle");
-                    int termFreq;
-                    if (freq.Eof())
-                    {
-                        if (postings.lastDocCodes[termID] != -1)
-                        {
-                            // Return last doc
-                            docID = postings.lastDocIDs[termID];
-                            if (readTermFreq)
-                            {
-                                termFreq = postings.termFreqs[termID];
-                            }
-                            else
-                            {
-                                termFreq = -1;
-                            }
-                            postings.lastDocCodes[termID] = -1;
-                        }
-                        else
-                        {
-                            // EOF
-                            break;
-                        }
-                    }
-                    else
-                    {
-                        int code = freq.ReadVInt32();
-                        if (!readTermFreq)
-                        {
-                            docID += code;
-                            termFreq = -1;
-                        }
-                        else
-                        {
-                            docID += (int)((uint)code >> 1);
-                            if ((code & 1) != 0)
-                            {
-                                termFreq = 1;
-                            }
-                            else
-                            {
-                                termFreq = freq.ReadVInt32();
-                            }
-                        }
-
-                        Debug.Assert(docID != postings.lastDocIDs[termID]);
-                    }
-
-                    docFreq++;
-                    Debug.Assert(docID < state.SegmentInfo.DocCount, "doc=" + docID + " maxDoc=" + state.SegmentInfo.DocCount);
-
-                    // NOTE: we could check here if the docID was
-                    // deleted, and skip it.  However, this is somewhat
-                    // dangerous because it can yield non-deterministic
-                    // behavior since we may see the docID before we see
-                    // the term that caused it to be deleted.  this
-                    // would mean some (but not all) of its postings may
-                    // make it into the index, which'd alter the docFreq
-                    // for those terms.  We could fix this by doing two
-                    // passes, ie first sweep marks all del docs, and
-                    // 2nd sweep does the real flush, but I suspect
-                    // that'd add too much time to flush.
-                    visitedDocs.Set(docID);
-                    postingsConsumer.StartDoc(docID, writeTermFreq ? termFreq : -1);
-                    if (docID < delDocLimit)
-                    {
-                        // Mark it deleted.  TODO: we could also skip
-                        // writing its postings; this would be
-                        // deterministic (just for this Term's docs).
-
-                        // TODO: can we do this reach-around in a cleaner way????
-                        if (state.LiveDocs == null)
-                        {
-                            state.LiveDocs = docState.docWriter.codec.LiveDocsFormat.NewLiveDocs(state.SegmentInfo.DocCount);
-                        }
-                        if (state.LiveDocs.Get(docID))
-                        {
-                            state.DelCountOnFlush++;
-                            state.LiveDocs.Clear(docID);
-                        }
-                    }
-
-                    totalTermFreq += termFreq;
-
-                    // Carefully copy over the prox + payload info,
-                    // changing the format to match Lucene's segment
-                    // format.
-
-                    if (readPositions || readOffsets)
-                    {
-                        // we did record positions (& maybe payload) and/or offsets
-                        int position = 0;
-                        int offset = 0;
-                        for (int j = 0; j < termFreq; j++)
-                        {
-                            BytesRef thisPayload;
-
-                            if (readPositions)
-                            {
-                                int code = prox.ReadVInt32();
-                                position += (int)((uint)code >> 1);
-
-                                if ((code & 1) != 0)
-                                {
-                                    // this position has a payload
-                                    int payloadLength = prox.ReadVInt32();
-
-                                    if (payload == null)
-                                    {
-                                        payload = new BytesRef();
-                                        payload.Bytes = new byte[payloadLength];
-                                    }
-                                    else if (payload.Bytes.Length < payloadLength)
-                                    {
-                                        payload.Grow(payloadLength);
-                                    }
-
-                                    prox.ReadBytes(payload.Bytes, 0, payloadLength);
-                                    payload.Length = payloadLength;
-                                    thisPayload = payload;
-                                }
-                                else
-                                {
-                                    thisPayload = null;
-                                }
-
-                                if (readOffsets)
-                                {
-                                    int startOffset = offset + prox.ReadVInt32();
-                                    int endOffset = startOffset + prox.ReadVInt32();
-                                    if (writePositions)
-                                    {
-                                        if (writeOffsets)
-                                        {
-                                            Debug.Assert(startOffset >= 0 && endOffset >= startOffset, "startOffset=" + startOffset + ",endOffset=" + endOffset + ",offset=" + offset);
-                                            postingsConsumer.AddPosition(position, thisPayload, startOffset, endOffset);
-                                        }
-                                        else
-                                        {
-                                            postingsConsumer.AddPosition(position, thisPayload, -1, -1);
-                                        }
-                                    }
-                                    offset = startOffset;
-                                }
-                                else if (writePositions)
-                                {
-                                    postingsConsumer.AddPosition(position, thisPayload, -1, -1);
-                                }
-                            }
-                        }
-                    }
-                    postingsConsumer.FinishDoc();
-                }
-                termsConsumer.FinishTerm(text, new TermStats(docFreq, writeTermFreq ? totalTermFreq : -1));
-                sumTotalTermFreq += totalTermFreq;
-                sumDocFreq += docFreq;
-            }
-
-            termsConsumer.Finish(writeTermFreq ? sumTotalTermFreq : -1, sumDocFreq, visitedDocs.Cardinality());
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/FrozenBufferedUpdates.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/FrozenBufferedUpdates.cs b/src/Lucene.Net.Core/Index/FrozenBufferedUpdates.cs
deleted file mode 100644
index 641699a..0000000
--- a/src/Lucene.Net.Core/Index/FrozenBufferedUpdates.cs
+++ /dev/null
@@ -1,287 +0,0 @@
-using System;
-using System.Collections;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Linq;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using ArrayUtil = Lucene.Net.Util.ArrayUtil;
-    using BinaryDocValuesUpdate = Lucene.Net.Index.DocValuesUpdate.BinaryDocValuesUpdate;
-    using NumericDocValuesUpdate = Lucene.Net.Index.DocValuesUpdate.NumericDocValuesUpdate;
-    using Query = Lucene.Net.Search.Query;
-    using QueryAndLimit = Lucene.Net.Index.BufferedUpdatesStream.QueryAndLimit;
-    using RamUsageEstimator = Lucene.Net.Util.RamUsageEstimator;
-
-    /// <summary>
-    /// Holds buffered deletes and updates by term or query, once pushed. Pushed
-    /// deletes/updates are write-once, so we shift to more memory efficient data
-    /// structure to hold them. We don't hold docIDs because these are applied on
-    /// flush.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal class FrozenBufferedUpdates
-    {
-        /* Query we often undercount (say 24 bytes), plus int. */
-        internal static readonly int BYTES_PER_DEL_QUERY = RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_INT32 + 24;
-
-        // Terms, in sorted order:
-        internal readonly PrefixCodedTerms terms;
-
-        internal int termCount; // just for debugging
-
-        // Parallel array of deleted query, and the docIDUpto for each
-        internal readonly Query[] queries;
-
-        internal readonly int[] queryLimits;
-
-        // numeric DV update term and their updates
-        internal readonly NumericDocValuesUpdate[] numericDVUpdates;
-
-        // binary DV update term and their updates
-        internal readonly BinaryDocValuesUpdate[] binaryDVUpdates;
-
-        internal readonly int bytesUsed;
-        internal readonly int numTermDeletes;
-        private long gen = -1; // assigned by BufferedDeletesStream once pushed
-        internal readonly bool isSegmentPrivate; // set to true iff this frozen packet represents
-        // a segment private deletes. in that case is should
-        // only have Queries
-
-        public FrozenBufferedUpdates(BufferedUpdates deletes, bool isSegmentPrivate)
-        {
-            this.isSegmentPrivate = isSegmentPrivate;
-            Debug.Assert(!isSegmentPrivate || deletes.terms.Count == 0, "segment private package should only have del queries");
-            Term[] termsArray = deletes.terms.Keys.ToArray(/*new Term[deletes.Terms.Count]*/);
-            termCount = termsArray.Length;
-            ArrayUtil.TimSort(termsArray);
-            PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
-            foreach (Term term in termsArray)
-            {
-                builder.Add(term);
-            }
-            terms = builder.Finish();
-
-            queries = new Query[deletes.queries.Count];
-            queryLimits = new int[deletes.queries.Count];
-            int upto = 0;
-            foreach (KeyValuePair<Query, int?> ent in deletes.queries)
-            {
-                queries[upto] = ent.Key;
-                if (ent.Value.HasValue)
-                {
-                    queryLimits[upto] = ent.Value.Value;
-                }
-                else
-                {
-                    // LUCENENET NOTE: According to this: http://stackoverflow.com/a/13914344
-                    // we are supposed to throw an exception in this case, rather than
-                    // silently fail.
-                    throw new NullReferenceException();
-                }
-                upto++;
-            }
-
-            // TODO if a Term affects multiple fields, we could keep the updates key'd by Term
-            // so that it maps to all fields it affects, sorted by their docUpto, and traverse
-            // that Term only once, applying the update to all fields that still need to be
-            // updated.
-            IList<NumericDocValuesUpdate> allNumericUpdates = new List<NumericDocValuesUpdate>();
-            int numericUpdatesSize = 0;
-            foreach (var numericUpdates in deletes.numericUpdates.Values)
-            {
-                foreach (NumericDocValuesUpdate update in numericUpdates.Values)
-                {
-                    allNumericUpdates.Add(update);
-                    numericUpdatesSize += update.GetSizeInBytes();
-                }
-            }
-            numericDVUpdates = allNumericUpdates.ToArray();
-
-            // TODO if a Term affects multiple fields, we could keep the updates key'd by Term
-            // so that it maps to all fields it affects, sorted by their docUpto, and traverse
-            // that Term only once, applying the update to all fields that still need to be
-            // updated.
-            IList<BinaryDocValuesUpdate> allBinaryUpdates = new List<BinaryDocValuesUpdate>();
-            int binaryUpdatesSize = 0;
-            foreach (var binaryUpdates in deletes.binaryUpdates.Values)
-            {
-                foreach (BinaryDocValuesUpdate update in binaryUpdates.Values)
-                {
-                    allBinaryUpdates.Add(update);
-                    binaryUpdatesSize += update.GetSizeInBytes();
-                }
-            }
-            binaryDVUpdates = allBinaryUpdates.ToArray();
-
-            bytesUsed = (int)terms.GetSizeInBytes() + queries.Length * BYTES_PER_DEL_QUERY + numericUpdatesSize + numericDVUpdates.Length * RamUsageEstimator.NUM_BYTES_OBJECT_REF + binaryUpdatesSize + binaryDVUpdates.Length * RamUsageEstimator.NUM_BYTES_OBJECT_REF;
-
-            numTermDeletes = deletes.numTermDeletes.Get();
-        }
-
-        public virtual long DelGen
-        {
-            set
-            {
-                Debug.Assert(this.gen == -1);
-                this.gen = value;
-            }
-            get
-            {
-                Debug.Assert(gen != -1);
-                return gen;
-            }
-        }
-
-        // LUCENENET NOTE: This was termsIterable() in Lucene
-        public virtual IEnumerable<Term> GetTermsEnumerable()
-        {
-            return new IterableAnonymousInnerClassHelper(this);
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        private class IterableAnonymousInnerClassHelper : IEnumerable<Term>
-        {
-            private readonly FrozenBufferedUpdates outerInstance;
-
-            public IterableAnonymousInnerClassHelper(FrozenBufferedUpdates outerInstance)
-            {
-                this.outerInstance = outerInstance;
-            }
-
-            public virtual IEnumerator<Term> GetEnumerator()
-            {
-                return outerInstance.terms.GetEnumerator();
-            }
-
-            System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
-            {
-                return GetEnumerator();
-            }
-        }
-
-        // LUCENENET NOTE: This was queriesIterable() in Lucene
-        public virtual IEnumerable<QueryAndLimit> GetQueriesEnumerable()
-        {
-            return new IterableAnonymousInnerClassHelper2(this);
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        private class IterableAnonymousInnerClassHelper2 : IEnumerable<QueryAndLimit>
-        {
-            private readonly FrozenBufferedUpdates outerInstance;
-
-            public IterableAnonymousInnerClassHelper2(FrozenBufferedUpdates outerInstance)
-            {
-                this.outerInstance = outerInstance;
-            }
-
-            public virtual IEnumerator<QueryAndLimit> GetEnumerator()
-            {
-                return new IteratorAnonymousInnerClassHelper(this);
-            }
-
-            IEnumerator IEnumerable.GetEnumerator()
-            {
-                return GetEnumerator();
-            }
-
-#if FEATURE_SERIALIZABLE
-            [Serializable]
-#endif
-            private class IteratorAnonymousInnerClassHelper : IEnumerator<QueryAndLimit>
-            {
-                private readonly IterableAnonymousInnerClassHelper2 outerInstance;
-                private int upto, i;
-                private QueryAndLimit current;
-
-                public IteratorAnonymousInnerClassHelper(IterableAnonymousInnerClassHelper2 outerInstance)
-                {
-                    this.outerInstance = outerInstance;
-                    upto = this.outerInstance.outerInstance.queries.Length;
-                    i = 0;
-                }
-
-                public virtual bool MoveNext()
-                {
-                    if (i < upto)
-                    {
-                        current = new QueryAndLimit(outerInstance.outerInstance.queries[i], outerInstance.outerInstance.queryLimits[i]);
-                        i++;
-                        return true;
-                    }
-                    return false;
-                }
-
-                public virtual QueryAndLimit Current
-                {
-                    get
-                    {
-                        return current;
-                    }
-                }
-
-                object System.Collections.IEnumerator.Current
-                {
-                    get { return Current; }
-                }
-
-                public virtual void Reset()
-                {
-                    throw new NotSupportedException();
-                }
-
-                public void Dispose()
-                {
-                }
-            }
-        }
-
-        public override string ToString()
-        {
-            string s = "";
-            if (numTermDeletes != 0)
-            {
-                s += " " + numTermDeletes + " deleted terms (unique count=" + termCount + ")";
-            }
-            if (queries.Length != 0)
-            {
-                s += " " + queries.Length + " deleted queries";
-            }
-            if (bytesUsed != 0)
-            {
-                s += " bytesUsed=" + bytesUsed;
-            }
-
-            return s;
-        }
-
-        public virtual bool Any()
-        {
-            return termCount > 0 || queries.Length > 0 || numericDVUpdates.Length > 0 || binaryDVUpdates.Length > 0;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/IConcurrentMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/IConcurrentMergeScheduler.cs b/src/Lucene.Net.Core/Index/IConcurrentMergeScheduler.cs
deleted file mode 100644
index 5f46979..0000000
--- a/src/Lucene.Net.Core/Index/IConcurrentMergeScheduler.cs
+++ /dev/null
@@ -1,34 +0,0 @@
-´╗┐namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    // LUCENENET specific
-    public interface IConcurrentMergeScheduler : IMergeScheduler
-    {
-        int MaxThreadCount { get; }
-        int MaxMergeCount { get; }
-        int MergeThreadPriority { get; }
-
-        void SetMergeThreadPriority(int priority);
-        void Sync();
-        void SetMaxMergesAndThreads(int maxMergeCount, int maxThreadCount);
-
-        void SetSuppressExceptions();
-        void ClearSuppressExceptions();
-    }
-}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/IMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/IMergeScheduler.cs b/src/Lucene.Net.Core/Index/IMergeScheduler.cs
deleted file mode 100644
index 69dfca0..0000000
--- a/src/Lucene.Net.Core/Index/IMergeScheduler.cs
+++ /dev/null
@@ -1,29 +0,0 @@
-´╗┐using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    // LUCENENET specific
-    public interface IMergeScheduler : IDisposable
-    {
-        void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound);
-
-        IMergeScheduler Clone();
-    }
-}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/IndexCommit.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/IndexCommit.cs b/src/Lucene.Net.Core/Index/IndexCommit.cs
deleted file mode 100644
index 24fab91..0000000
--- a/src/Lucene.Net.Core/Index/IndexCommit.cs
+++ /dev/null
@@ -1,152 +0,0 @@
-using System;
-using System.Collections.Generic;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-    */
-
-    using Directory = Lucene.Net.Store.Directory;
-
-    /// <summary>
-    /// <p>Expert: represents a single commit into an index as seen by the
-    /// <seealso cref="IndexDeletionPolicy"/> or <seealso cref="IndexReader"/>.</p>
-    ///
-    /// <p> Changes to the content of an index are made visible
-    /// only after the writer who made that change commits by
-    /// writing a new segments file
-    /// (<code>segments_N</code>). this point in time, when the
-    /// action of writing of a new segments file to the directory
-    /// is completed, is an index commit.</p>
-    ///
-    /// <p>Each index commit point has a unique segments file
-    /// associated with it. The segments file associated with a
-    /// later index commit point would have a larger N.</p>
-    ///
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class IndexCommit : IComparable<IndexCommit>
-    {
-        /// <summary>
-        /// Get the segments file (<code>segments_N</code>) associated
-        /// with this commit point.
-        /// </summary>
-        public abstract string SegmentsFileName { get; }
-
-        /// <summary>
-        /// Returns all index files referenced by this commit point.
-        /// </summary>
-        public abstract ICollection<string> FileNames { get; }
-
-        /// <summary>
-        /// Returns the <seealso cref="Directory"/> for the index.
-        /// </summary>
-        public abstract Directory Directory { get; }
-
-        /// <summary>
-        /// Delete this commit point.  this only applies when using
-        /// the commit point in the context of IndexWriter's
-        /// IndexDeletionPolicy.
-        /// <p>
-        /// Upon calling this, the writer is notified that this commit
-        /// point should be deleted.
-        /// <p>
-        /// Decision that a commit-point should be deleted is taken by the <seealso cref="IndexDeletionPolicy"/> in effect
-        /// and therefore this should only be called by its <seealso cref="IndexDeletionPolicy#onInit onInit()"/> or
-        /// <seealso cref="IndexDeletionPolicy#onCommit onCommit()"/> methods.
-        /// </summary>
-        public abstract void Delete();
-
-        /// <summary>
-        /// Returns true if this commit should be deleted; this is
-        ///  only used by <seealso cref="IndexWriter"/> after invoking the
-        ///  <seealso cref="IndexDeletionPolicy"/>.
-        /// </summary>
-        public abstract bool IsDeleted { get; }
-
-        /// <summary>
-        /// Returns number of segments referenced by this commit. </summary>
-        public abstract int SegmentCount { get; }
-
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
-        /// </summary>
-        protected IndexCommit()
-        {
-        }
-
-        /// <summary>
-        /// Two IndexCommits are equal if both their Directory and versions are equal. </summary>
-        public override bool Equals(object other)
-        {
-            if (other is IndexCommit)
-            {
-                IndexCommit otherCommit = (IndexCommit)other;
-                return otherCommit.Directory == Directory && otherCommit.Generation == Generation;
-            }
-            else
-            {
-                return false;
-            }
-        }
-
-        public override int GetHashCode()
-        {
-            return Directory.GetHashCode() + Convert.ToInt64(Generation).GetHashCode();
-        }
-
-        /// <summary>
-        /// Returns the generation (the _N in segments_N) for this
-        ///  IndexCommit
-        /// </summary>
-        public abstract long Generation { get; }
-
-        /// <summary>
-        /// Returns userData, previously passed to {@link
-        ///  IndexWriter#setCommitData(Map)} for this commit.  Map is
-        ///  String -> String.
-        /// </summary>
-        public abstract IDictionary<string, string> UserData { get; }
-
-        public virtual int CompareTo(IndexCommit commit)
-        {
-            if (Directory != commit.Directory)
-            {
-                throw new System.NotSupportedException("cannot compare IndexCommits from different Directory instances");
-            }
-
-            long gen = Generation;
-            long comgen = commit.Generation;
-            if (gen < comgen)
-            {
-                return -1;
-            }
-            else if (gen > comgen)
-            {
-                return 1;
-            }
-            else
-            {
-                return 0;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/IndexDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/IndexDeletionPolicy.cs b/src/Lucene.Net.Core/Index/IndexDeletionPolicy.cs
deleted file mode 100644
index d534cdd..0000000
--- a/src/Lucene.Net.Core/Index/IndexDeletionPolicy.cs
+++ /dev/null
@@ -1,116 +0,0 @@
-using System;
-using System.Collections.Generic;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// <p>Expert: policy for deletion of stale <seealso cref="IndexCommit index commits"/>.
-    ///
-    /// <p>Implement this interface, and pass it to one
-    /// of the <seealso cref="IndexWriter"/> or <seealso cref="IndexReader"/>
-    /// constructors, to customize when older
-    /// <seealso cref="IndexCommit point-in-time commits"/>
-    /// are deleted from the index directory.  The default deletion policy
-    /// is <seealso cref="KeepOnlyLastCommitDeletionPolicy"/>, which always
-    /// removes old commits as soon as a new commit is done (this
-    /// matches the behavior before 2.2).</p>
-    ///
-    /// <p>One expected use case for this (and the reason why it
-    /// was first created) is to work around problems with an
-    /// index directory accessed via filesystems like NFS because
-    /// NFS does not provide the "delete on last close" semantics
-    /// that Lucene's "point in time" search normally relies on.
-    /// By implementing a custom deletion policy, such as "a
-    /// commit is only removed once it has been stale for more
-    /// than X minutes", you can give your readers time to
-    /// refresh to the new commit before <seealso cref="IndexWriter"/>
-    /// removes the old commits.  Note that doing so will
-    /// increase the storage requirements of the index.  See <a
-    /// target="top"
-    /// href="http://issues.apache.org/jira/browse/LUCENE-710">LUCENE-710</a>
-    /// for details.</p>
-    ///
-    /// <p>Implementers of sub-classes should make sure that <seealso cref="#clone()"/>
-    /// returns an independent instance able to work with any other <seealso cref="IndexWriter"/>
-    /// or <seealso cref="Directory"/> instance.</p>
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class IndexDeletionPolicy
-    {
-        /// <summary>
-        /// Sole constructor, typically called by sub-classes constructors. </summary>
-        protected IndexDeletionPolicy()
-        {
-        }
-
-        /// <summary>
-        /// <p>this is called once when a writer is first
-        /// instantiated to give the policy a chance to remove old
-        /// commit points.</p>
-        ///
-        /// <p>The writer locates all index commits present in the
-        /// index directory and calls this method.  The policy may
-        /// choose to delete some of the commit points, doing so by
-        /// calling method <seealso cref="IndexCommit#delete delete()"/>
-        /// of <seealso cref="IndexCommit"/>.</p>
-        ///
-        /// <p><u>Note:</u> the last CommitPoint is the most recent one,
-        /// i.e. the "front index state". Be careful not to delete it,
-        /// unless you know for sure what you are doing, and unless
-        /// you can afford to lose the index content while doing that.
-        /// </summary>
-        /// <param name="commits"> List of current
-        /// <seealso cref="IndexCommit point-in-time commits"/>,
-        ///  sorted by age (the 0th one is the oldest commit).
-        ///  Note that for a new index this method is invoked with
-        ///  an empty list. </param>
-        public abstract void OnInit<T>(IList<T> commits) where T : IndexCommit;
-
-        /// <summary>
-        /// <p>this is called each time the writer completed a commit.
-        /// this gives the policy a chance to remove old commit points
-        /// with each commit.</p>
-        ///
-        /// <p>The policy may now choose to delete old commit points
-        /// by calling method <seealso cref="IndexCommit#delete delete()"/>
-        /// of <seealso cref="IndexCommit"/>.</p>
-        ///
-        /// <p>this method is only called when {@link
-        /// IndexWriter#commit} or <seealso cref="IndexWriter#close"/> is
-        /// called, or possibly not at all if the {@link
-        /// IndexWriter#rollback} is called.
-        ///
-        /// <p><u>Note:</u> the last CommitPoint is the most recent one,
-        /// i.e. the "front index state". Be careful not to delete it,
-        /// unless you know for sure what you are doing, and unless
-        /// you can afford to lose the index content while doing that.
-        /// </summary>
-        /// <param name="commits"> List of <seealso cref="IndexCommit"/>,
-        ///  sorted by age (the 0th one is the oldest commit). </param>
-        public abstract void OnCommit<T>(IList<T> commits) where T : IndexCommit;
-
-        public virtual object Clone()
-        {
-            return (IndexDeletionPolicy)base.MemberwiseClone();
-        }
-    }
-}
\ No newline at end of file


Mime
View raw message