lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [33/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:39 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41PostingsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41PostingsWriter.cs b/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41PostingsWriter.cs
deleted file mode 100644
index dcd68f1..0000000
--- a/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41PostingsWriter.cs
+++ /dev/null
@@ -1,692 +0,0 @@
-using System;
-using System.Diagnostics;
-
-namespace Lucene.Net.Codecs.Lucene41
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using ArrayUtil = Util.ArrayUtil;
-    using BytesRef = Util.BytesRef;
-    using DataOutput = Store.DataOutput;
-    using FieldInfo = Index.FieldInfo;
-    using IndexFileNames = Index.IndexFileNames;
-    using IndexOptions = Lucene.Net.Index.IndexOptions;
-    using IndexOutput = Store.IndexOutput;
-    using IOUtils = Util.IOUtils;
-    using PackedInt32s = Util.Packed.PackedInt32s;
-    using SegmentWriteState = Index.SegmentWriteState;
-    using TermState = Index.TermState;
-
-    /// <summary>
-    /// Concrete class that writes docId(maybe frq,pos,offset,payloads) list
-    /// with postings format.
-    ///
-    /// Postings list for each term will be stored separately.
-    /// </summary>
-    /// <seealso cref= Lucene41SkipWriter for details about skipping setting and postings layout.
-    /// @lucene.experimental </seealso>
-    public sealed class Lucene41PostingsWriter : PostingsWriterBase
-    {
-        /// <summary>
-        /// Expert: The maximum number of skip levels. Smaller values result in
-        /// slightly smaller indexes, but slower skipping in big posting lists.
-        /// </summary>
-        internal const int maxSkipLevels = 10;
-
-        internal const string TERMS_CODEC = "Lucene41PostingsWriterTerms";
-        internal const string DOC_CODEC = "Lucene41PostingsWriterDoc";
-        internal const string POS_CODEC = "Lucene41PostingsWriterPos";
-        internal const string PAY_CODEC = "Lucene41PostingsWriterPay";
-
-        // Increment version to change it
-        internal const int VERSION_START = 0;
-
-        internal const int VERSION_META_ARRAY = 1;
-        internal const int VERSION_CHECKSUM = 2;
-        internal const int VERSION_CURRENT = VERSION_CHECKSUM;
-
-        internal IndexOutput docOut;
-        internal IndexOutput posOut;
-        internal IndexOutput payOut;
-
-        internal static readonly Int32BlockTermState emptyState = new Int32BlockTermState();
-        internal Int32BlockTermState lastState;
-
-        // How current field indexes postings:
-        private bool fieldHasFreqs;
-
-        private bool fieldHasPositions;
-        private bool fieldHasOffsets;
-        private bool fieldHasPayloads;
-
-        // Holds starting file pointers for current term:
-        private long docStartFP;
-
-        private long posStartFP;
-        private long payStartFP;
-
-        internal readonly int[] docDeltaBuffer;
-        internal readonly int[] freqBuffer;
-        private int docBufferUpto;
-
-        internal readonly int[] posDeltaBuffer;
-        internal readonly int[] payloadLengthBuffer;
-        internal readonly int[] offsetStartDeltaBuffer;
-        internal readonly int[] offsetLengthBuffer;
-        private int posBufferUpto;
-
-        private byte[] payloadBytes;
-        private int payloadByteUpto;
-
-        private int lastBlockDocID;
-        private long lastBlockPosFP;
-        private long lastBlockPayFP;
-        private int lastBlockPosBufferUpto;
-        private int lastBlockPayloadByteUpto;
-
-        private int lastDocID;
-        private int lastPosition;
-        private int lastStartOffset;
-        private int docCount;
-
-        internal readonly byte[] encoded;
-
-        private readonly ForUtil forUtil;
-        private readonly Lucene41SkipWriter skipWriter;
-
-        /// <summary>
-        /// Creates a postings writer with the specified PackedInts overhead ratio </summary>
-        // TODO: does this ctor even make sense?
-        public Lucene41PostingsWriter(SegmentWriteState state, float acceptableOverheadRatio)
-            : base()
-        {
-            docOut = state.Directory.CreateOutput(IndexFileNames.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, Lucene41PostingsFormat.DOC_EXTENSION), state.Context);
-            IndexOutput posOut = null;
-            IndexOutput payOut = null;
-            bool success = false;
-            try
-            {
-                CodecUtil.WriteHeader(docOut, DOC_CODEC, VERSION_CURRENT);
-                forUtil = new ForUtil(acceptableOverheadRatio, docOut);
-                if (state.FieldInfos.HasProx)
-                {
-                    posDeltaBuffer = new int[ForUtil.MAX_DATA_SIZE];
-                    posOut = state.Directory.CreateOutput(IndexFileNames.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, Lucene41PostingsFormat.POS_EXTENSION), state.Context);
-                    CodecUtil.WriteHeader(posOut, POS_CODEC, VERSION_CURRENT);
-
-                    if (state.FieldInfos.HasPayloads)
-                    {
-                        payloadBytes = new byte[128];
-                        payloadLengthBuffer = new int[ForUtil.MAX_DATA_SIZE];
-                    }
-                    else
-                    {
-                        payloadBytes = null;
-                        payloadLengthBuffer = null;
-                    }
-
-                    if (state.FieldInfos.HasOffsets)
-                    {
-                        offsetStartDeltaBuffer = new int[ForUtil.MAX_DATA_SIZE];
-                        offsetLengthBuffer = new int[ForUtil.MAX_DATA_SIZE];
-                    }
-                    else
-                    {
-                        offsetStartDeltaBuffer = null;
-                        offsetLengthBuffer = null;
-                    }
-
-                    if (state.FieldInfos.HasPayloads || state.FieldInfos.HasOffsets)
-                    {
-                        payOut = state.Directory.CreateOutput(IndexFileNames.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, Lucene41PostingsFormat.PAY_EXTENSION), state.Context);
-                        CodecUtil.WriteHeader(payOut, PAY_CODEC, VERSION_CURRENT);
-                    }
-                }
-                else
-                {
-                    posDeltaBuffer = null;
-                    payloadLengthBuffer = null;
-                    offsetStartDeltaBuffer = null;
-                    offsetLengthBuffer = null;
-                    payloadBytes = null;
-                }
-                this.payOut = payOut;
-                this.posOut = posOut;
-                success = true;
-            }
-            finally
-            {
-                if (!success)
-                {
-                    IOUtils.CloseWhileHandlingException(docOut, posOut, payOut);
-                }
-            }
-
-            docDeltaBuffer = new int[ForUtil.MAX_DATA_SIZE];
-            freqBuffer = new int[ForUtil.MAX_DATA_SIZE];
-
-            // TODO: should we try skipping every 2/4 blocks...?
-            skipWriter = new Lucene41SkipWriter(maxSkipLevels, Lucene41PostingsFormat.BLOCK_SIZE, state.SegmentInfo.DocCount, docOut, posOut, payOut);
-
-            encoded = new byte[ForUtil.MAX_ENCODED_SIZE];
-        }
-
-        /// <summary>
-        /// Creates a postings writer with <code>PackedInts.COMPACT</code> </summary>
-        public Lucene41PostingsWriter(SegmentWriteState state)
-            : this(state, PackedInt32s.COMPACT)
-        {
-        }
-
-        /// <summary>
-        /// NOTE: This was IntBlockTermState in Lucene
-        /// </summary>
-        public sealed class Int32BlockTermState : BlockTermState
-        {
-            internal long docStartFP = 0;
-            internal long posStartFP = 0;
-            internal long payStartFP = 0;
-            internal long skipOffset = -1;
-            internal long lastPosBlockOffset = -1;
-
-            // docid when there is a single pulsed posting, otherwise -1
-            // freq is always implicitly totalTermFreq in this case.
-            internal int singletonDocID = -1;
-
-            public override object Clone()
-            {
-                Int32BlockTermState other = new Int32BlockTermState();
-                other.CopyFrom(this);
-                return other;
-            }
-
-            public override void CopyFrom(TermState other)
-            {
-                base.CopyFrom(other);
-                Int32BlockTermState other2 = (Int32BlockTermState)other;
-                docStartFP = other2.docStartFP;
-                posStartFP = other2.posStartFP;
-                payStartFP = other2.payStartFP;
-                lastPosBlockOffset = other2.lastPosBlockOffset;
-                skipOffset = other2.skipOffset;
-                singletonDocID = other2.singletonDocID;
-            }
-
-            public override string ToString()
-            {
-                return base.ToString() + " docStartFP=" + docStartFP + " posStartFP=" + posStartFP + " payStartFP=" + payStartFP + " lastPosBlockOffset=" + lastPosBlockOffset + " singletonDocID=" + singletonDocID;
-            }
-        }
-
-        public override BlockTermState NewTermState()
-        {
-            return new Int32BlockTermState();
-        }
-
-        public override void Init(IndexOutput termsOut)
-        {
-            CodecUtil.WriteHeader(termsOut, TERMS_CODEC, VERSION_CURRENT);
-            termsOut.WriteVInt32(Lucene41PostingsFormat.BLOCK_SIZE);
-        }
-
-        public override int SetField(FieldInfo fieldInfo)
-        {
-            IndexOptions indexOptions = fieldInfo.IndexOptions;
-            fieldHasFreqs = indexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
-            fieldHasPositions = indexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
-            fieldHasOffsets = indexOptions.CompareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
-            fieldHasPayloads = fieldInfo.HasPayloads;
-            skipWriter.SetField(fieldHasPositions, fieldHasOffsets, fieldHasPayloads);
-            lastState = emptyState;
-            if (fieldHasPositions)
-            {
-                if (fieldHasPayloads || fieldHasOffsets)
-                {
-                    return 3; // doc + pos + pay FP
-                }
-                else
-                {
-                    return 2; // doc + pos FP
-                }
-            }
-            else
-            {
-                return 1; // doc FP
-            }
-        }
-
-        public override void StartTerm()
-        {
-            docStartFP = docOut.GetFilePointer();
-            if (fieldHasPositions)
-            {
-                posStartFP = posOut.GetFilePointer();
-                if (fieldHasPayloads || fieldHasOffsets)
-                {
-                    payStartFP = payOut.GetFilePointer();
-                }
-            }
-            lastDocID = 0;
-            lastBlockDocID = -1;
-            // if (DEBUG) {
-            //   System.out.println("FPW.startTerm startFP=" + docStartFP);
-            // }
-            skipWriter.ResetSkip();
-        }
-
-        public override void StartDoc(int docId, int termDocFreq)
-        {
-            // if (DEBUG) {
-            //   System.out.println("FPW.startDoc docID["+docBufferUpto+"]=" + docID);
-            // }
-            // Have collected a block of docs, and get a new doc.
-            // Should write skip data as well as postings list for
-            // current block.
-            if (lastBlockDocID != -1 && docBufferUpto == 0)
-            {
-                // if (DEBUG) {
-                //   System.out.println("  bufferSkip at writeBlock: lastDocID=" + lastBlockDocID + " docCount=" + (docCount-1));
-                // }
-                skipWriter.BufferSkip(lastBlockDocID, docCount, lastBlockPosFP, lastBlockPayFP, lastBlockPosBufferUpto, lastBlockPayloadByteUpto);
-            }
-
-            int docDelta = docId - lastDocID;
-
-            if (docId < 0 || (docCount > 0 && docDelta <= 0))
-            {
-                throw new Exception("docs out of order (" + docId + " <= " + lastDocID + " ) (docOut: " + docOut + ")");
-            }
-
-            docDeltaBuffer[docBufferUpto] = docDelta;
-            // if (DEBUG) {
-            //   System.out.println("  docDeltaBuffer[" + docBufferUpto + "]=" + docDelta);
-            // }
-            if (fieldHasFreqs)
-            {
-                freqBuffer[docBufferUpto] = termDocFreq;
-            }
-            docBufferUpto++;
-            docCount++;
-
-            if (docBufferUpto == Lucene41PostingsFormat.BLOCK_SIZE)
-            {
-                // if (DEBUG) {
-                //   System.out.println("  write docDelta block @ fp=" + docOut.getFilePointer());
-                // }
-                forUtil.WriteBlock(docDeltaBuffer, encoded, docOut);
-                if (fieldHasFreqs)
-                {
-                    // if (DEBUG) {
-                    //   System.out.println("  write freq block @ fp=" + docOut.getFilePointer());
-                    // }
-                    forUtil.WriteBlock(freqBuffer, encoded, docOut);
-                }
-                // NOTE: don't set docBufferUpto back to 0 here;
-                // finishDoc will do so (because it needs to see that
-                // the block was filled so it can save skip data)
-            }
-
-            lastDocID = docId;
-            lastPosition = 0;
-            lastStartOffset = 0;
-        }
-
-        /// <summary>
-        /// Add a new position & payload </summary>
-        public override void AddPosition(int position, BytesRef payload, int startOffset, int endOffset)
-        {
-            // if (DEBUG) {
-            //   System.out.println("FPW.addPosition pos=" + position + " posBufferUpto=" + posBufferUpto + (fieldHasPayloads ? " payloadByteUpto=" + payloadByteUpto: ""));
-            // }
-            posDeltaBuffer[posBufferUpto] = position - lastPosition;
-            if (fieldHasPayloads)
-            {
-                if (payload == null || payload.Length == 0)
-                {
-                    // no payload
-                    payloadLengthBuffer[posBufferUpto] = 0;
-                }
-                else
-                {
-                    payloadLengthBuffer[posBufferUpto] = payload.Length;
-                    if (payloadByteUpto + payload.Length > payloadBytes.Length)
-                    {
-                        payloadBytes = ArrayUtil.Grow(payloadBytes, payloadByteUpto + payload.Length);
-                    }
-                    Array.Copy(payload.Bytes, payload.Offset, payloadBytes, payloadByteUpto, payload.Length);
-                    payloadByteUpto += payload.Length;
-                }
-            }
-
-            if (fieldHasOffsets)
-            {
-                Debug.Assert(startOffset >= lastStartOffset);
-                Debug.Assert(endOffset >= startOffset);
-                offsetStartDeltaBuffer[posBufferUpto] = startOffset - lastStartOffset;
-                offsetLengthBuffer[posBufferUpto] = endOffset - startOffset;
-                lastStartOffset = startOffset;
-            }
-
-            posBufferUpto++;
-            lastPosition = position;
-            if (posBufferUpto == Lucene41PostingsFormat.BLOCK_SIZE)
-            {
-                // if (DEBUG) {
-                //   System.out.println("  write pos bulk block @ fp=" + posOut.getFilePointer());
-                // }
-                forUtil.WriteBlock(posDeltaBuffer, encoded, posOut);
-
-                if (fieldHasPayloads)
-                {
-                    forUtil.WriteBlock(payloadLengthBuffer, encoded, payOut);
-                    payOut.WriteVInt32(payloadByteUpto);
-                    payOut.WriteBytes(payloadBytes, 0, payloadByteUpto);
-                    payloadByteUpto = 0;
-                }
-                if (fieldHasOffsets)
-                {
-                    forUtil.WriteBlock(offsetStartDeltaBuffer, encoded, payOut);
-                    forUtil.WriteBlock(offsetLengthBuffer, encoded, payOut);
-                }
-                posBufferUpto = 0;
-            }
-        }
-
-        public override void FinishDoc()
-        {
-            // Since we don't know df for current term, we had to buffer
-            // those skip data for each block, and when a new doc comes,
-            // write them to skip file.
-            if (docBufferUpto == Lucene41PostingsFormat.BLOCK_SIZE)
-            {
-                lastBlockDocID = lastDocID;
-                if (posOut != null)
-                {
-                    if (payOut != null)
-                    {
-                        lastBlockPayFP = payOut.GetFilePointer();
-                    }
-                    lastBlockPosFP = posOut.GetFilePointer();
-                    lastBlockPosBufferUpto = posBufferUpto;
-                    lastBlockPayloadByteUpto = payloadByteUpto;
-                }
-                // if (DEBUG) {
-                //   System.out.println("  docBufferUpto="+docBufferUpto+" now get lastBlockDocID="+lastBlockDocID+" lastBlockPosFP=" + lastBlockPosFP + " lastBlockPosBufferUpto=" +  lastBlockPosBufferUpto + " lastBlockPayloadByteUpto=" + lastBlockPayloadByteUpto);
-                // }
-                docBufferUpto = 0;
-            }
-        }
-
-        /// <summary>
-        /// Called when we are done adding docs to this term </summary>
-        public override void FinishTerm(BlockTermState state)
-        {
-            Int32BlockTermState state2 = (Int32BlockTermState)state;
-            Debug.Assert(state2.DocFreq > 0);
-
-            // TODO: wasteful we are counting this (counting # docs
-            // for this term) in two places?
-            Debug.Assert(state2.DocFreq == docCount, state2.DocFreq + " vs " + docCount);
-
-            // if (DEBUG) {
-            //   System.out.println("FPW.finishTerm docFreq=" + state2.docFreq);
-            // }
-
-            // if (DEBUG) {
-            //   if (docBufferUpto > 0) {
-            //     System.out.println("  write doc/freq vInt block (count=" + docBufferUpto + ") at fp=" + docOut.getFilePointer() + " docStartFP=" + docStartFP);
-            //   }
-            // }
-
-            // docFreq == 1, don't write the single docid/freq to a separate file along with a pointer to it.
-            int singletonDocID;
-            if (state2.DocFreq == 1)
-            {
-                // pulse the singleton docid into the term dictionary, freq is implicitly totalTermFreq
-                singletonDocID = docDeltaBuffer[0];
-            }
-            else
-            {
-                singletonDocID = -1;
-                // vInt encode the remaining doc deltas and freqs:
-                for (int i = 0; i < docBufferUpto; i++)
-                {
-                    int docDelta = docDeltaBuffer[i];
-                    int freq = freqBuffer[i];
-                    if (!fieldHasFreqs)
-                    {
-                        docOut.WriteVInt32(docDelta);
-                    }
-                    else if (freqBuffer[i] == 1)
-                    {
-                        docOut.WriteVInt32((docDelta << 1) | 1);
-                    }
-                    else
-                    {
-                        docOut.WriteVInt32(docDelta << 1);
-                        docOut.WriteVInt32(freq);
-                    }
-                }
-            }
-
-            long lastPosBlockOffset;
-
-            if (fieldHasPositions)
-            {
-                // if (DEBUG) {
-                //   if (posBufferUpto > 0) {
-                //     System.out.println("  write pos vInt block (count=" + posBufferUpto + ") at fp=" + posOut.getFilePointer() + " posStartFP=" + posStartFP + " hasPayloads=" + fieldHasPayloads + " hasOffsets=" + fieldHasOffsets);
-                //   }
-                // }
-
-                // totalTermFreq is just total number of positions(or payloads, or offsets)
-                // associated with current term.
-                Debug.Assert(state2.TotalTermFreq != -1);
-                if (state2.TotalTermFreq > Lucene41PostingsFormat.BLOCK_SIZE)
-                {
-                    // record file offset for last pos in last block
-                    lastPosBlockOffset = posOut.GetFilePointer() - posStartFP;
-                }
-                else
-                {
-                    lastPosBlockOffset = -1;
-                }
-                if (posBufferUpto > 0)
-                {
-                    // TODO: should we send offsets/payloads to
-                    // .pay...?  seems wasteful (have to store extra
-                    // vLong for low (< BLOCK_SIZE) DF terms = vast vast
-                    // majority)
-
-                    // vInt encode the remaining positions/payloads/offsets:
-                    int lastPayloadLength = -1; // force first payload length to be written
-                    int lastOffsetLength = -1; // force first offset length to be written
-                    int payloadBytesReadUpto = 0;
-                    for (int i = 0; i < posBufferUpto; i++)
-                    {
-                        int posDelta = posDeltaBuffer[i];
-                        if (fieldHasPayloads)
-                        {
-                            int payloadLength = payloadLengthBuffer[i];
-                            if (payloadLength != lastPayloadLength)
-                            {
-                                lastPayloadLength = payloadLength;
-                                posOut.WriteVInt32((posDelta << 1) | 1);
-                                posOut.WriteVInt32(payloadLength);
-                            }
-                            else
-                            {
-                                posOut.WriteVInt32(posDelta << 1);
-                            }
-
-                            // if (DEBUG) {
-                            //   System.out.println("        i=" + i + " payloadLen=" + payloadLength);
-                            // }
-
-                            if (payloadLength != 0)
-                            {
-                                // if (DEBUG) {
-                                //   System.out.println("          write payload @ pos.fp=" + posOut.getFilePointer());
-                                // }
-                                posOut.WriteBytes(payloadBytes, payloadBytesReadUpto, payloadLength);
-                                payloadBytesReadUpto += payloadLength;
-                            }
-                        }
-                        else
-                        {
-                            posOut.WriteVInt32(posDelta);
-                        }
-
-                        if (fieldHasOffsets)
-                        {
-                            // if (DEBUG) {
-                            //   System.out.println("          write offset @ pos.fp=" + posOut.getFilePointer());
-                            // }
-                            int delta = offsetStartDeltaBuffer[i];
-                            int length = offsetLengthBuffer[i];
-                            if (length == lastOffsetLength)
-                            {
-                                posOut.WriteVInt32(delta << 1);
-                            }
-                            else
-                            {
-                                posOut.WriteVInt32(delta << 1 | 1);
-                                posOut.WriteVInt32(length);
-                                lastOffsetLength = length;
-                            }
-                        }
-                    }
-
-                    if (fieldHasPayloads)
-                    {
-                        Debug.Assert(payloadBytesReadUpto == payloadByteUpto);
-                        payloadByteUpto = 0;
-                    }
-                }
-                // if (DEBUG) {
-                //   System.out.println("  totalTermFreq=" + state.totalTermFreq + " lastPosBlockOffset=" + lastPosBlockOffset);
-                // }
-            }
-            else
-            {
-                lastPosBlockOffset = -1;
-            }
-
-            long skipOffset;
-            if (docCount > Lucene41PostingsFormat.BLOCK_SIZE)
-            {
-                skipOffset = skipWriter.WriteSkip(docOut) - docStartFP;
-
-                // if (DEBUG) {
-                //   System.out.println("skip packet " + (docOut.getFilePointer() - (docStartFP + skipOffset)) + " bytes");
-                // }
-            }
-            else
-            {
-                skipOffset = -1;
-                // if (DEBUG) {
-                //   System.out.println("  no skip: docCount=" + docCount);
-                // }
-            }
-            // if (DEBUG) {
-            //   System.out.println("  payStartFP=" + payStartFP);
-            // }
-            state2.docStartFP = docStartFP;
-            state2.posStartFP = posStartFP;
-            state2.payStartFP = payStartFP;
-            state2.singletonDocID = singletonDocID;
-            state2.skipOffset = skipOffset;
-            state2.lastPosBlockOffset = lastPosBlockOffset;
-            docBufferUpto = 0;
-            posBufferUpto = 0;
-            lastDocID = 0;
-            docCount = 0;
-        }
-
-        public override void EncodeTerm(long[] longs, DataOutput @out, FieldInfo fieldInfo, BlockTermState state, bool absolute)
-        {
-            Int32BlockTermState state2 = (Int32BlockTermState)state;
-            if (absolute)
-            {
-                lastState = emptyState;
-            }
-            longs[0] = state2.docStartFP - lastState.docStartFP;
-            if (fieldHasPositions)
-            {
-                longs[1] = state2.posStartFP - lastState.posStartFP;
-                if (fieldHasPayloads || fieldHasOffsets)
-                {
-                    longs[2] = state2.payStartFP - lastState.payStartFP;
-                }
-            }
-            if (state2.singletonDocID != -1)
-            {
-                @out.WriteVInt32(state2.singletonDocID);
-            }
-            if (fieldHasPositions)
-            {
-                if (state2.lastPosBlockOffset != -1)
-                {
-                    @out.WriteVInt64(state2.lastPosBlockOffset);
-                }
-            }
-            if (state2.skipOffset != -1)
-            {
-                @out.WriteVInt64(state2.skipOffset);
-            }
-            lastState = state2;
-        }
-
-        protected override void Dispose(bool disposing)
-        {
-            if (disposing)
-            {
-                // TODO: add a finish() at least to PushBase? DV too...?
-                bool success = false;
-                try
-                {
-                    if (docOut != null)
-                    {
-                        CodecUtil.WriteFooter(docOut);
-                    }
-                    if (posOut != null)
-                    {
-                        CodecUtil.WriteFooter(posOut);
-                    }
-                    if (payOut != null)
-                    {
-                        CodecUtil.WriteFooter(payOut);
-                    }
-                    success = true;
-                }
-                finally
-                {
-                    if (success)
-                    {
-                        IOUtils.Close(docOut, posOut, payOut);
-                    }
-                    else
-                    {
-                        IOUtils.CloseWhileHandlingException(docOut, posOut, payOut);
-                    }
-                    docOut = posOut = payOut = null;
-                }
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41SkipReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41SkipReader.cs b/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41SkipReader.cs
deleted file mode 100644
index 675777e..0000000
--- a/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41SkipReader.cs
+++ /dev/null
@@ -1,275 +0,0 @@
-using Lucene.Net.Support;
-using System.Diagnostics;
-
-namespace Lucene.Net.Codecs.Lucene41
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using IndexInput = Lucene.Net.Store.IndexInput;
-
-    /// <summary>
-    /// Implements the skip list reader for block postings format
-    /// that stores positions and payloads.
-    ///
-    /// Although this skipper uses MultiLevelSkipListReader as an interface,
-    /// its definition of skip position will be a little different.
-    ///
-    /// For example, when skipInterval = blockSize = 3, df = 2*skipInterval = 6,
-    ///
-    /// 0 1 2 3 4 5
-    /// d d d d d d    (posting list)
-    ///     ^     ^    (skip point in MultiLeveSkipWriter)
-    ///       ^        (skip point in Lucene41SkipWriter)
-    ///
-    /// In this case, MultiLevelSkipListReader will use the last document as a skip point,
-    /// while Lucene41SkipReader should assume no skip point will comes.
-    ///
-    /// If we use the interface directly in Lucene41SkipReader, it may silly try to read
-    /// another skip data after the only skip point is loaded.
-    ///
-    /// To illustrate this, we can call skipTo(d[5]), since skip point d[3] has smaller docId,
-    /// and numSkipped+blockSize== df, the MultiLevelSkipListReader will assume the skip list
-    /// isn't exhausted yet, and try to load a non-existed skip point
-    ///
-    /// Therefore, we'll trim df before passing it to the interface. see trim(int)
-    ///
-    /// </summary>
-    internal sealed class Lucene41SkipReader : MultiLevelSkipListReader
-    {
-        // private boolean DEBUG = Lucene41PostingsReader.DEBUG;
-        private readonly int blockSize;
-
-        private long[] docPointer;
-        private long[] posPointer;
-        private long[] payPointer;
-        private int[] posBufferUpto;
-        private int[] payloadByteUpto;
-
-        private long lastPosPointer;
-        private long lastPayPointer;
-        private int lastPayloadByteUpto;
-        private long lastDocPointer;
-        private int lastPosBufferUpto;
-
-        public Lucene41SkipReader(IndexInput skipStream, int maxSkipLevels, int blockSize, bool hasPos, bool hasOffsets, bool hasPayloads)
-            : base(skipStream, maxSkipLevels, blockSize, 8)
-        {
-            this.blockSize = blockSize;
-            docPointer = new long[maxSkipLevels];
-            if (hasPos)
-            {
-                posPointer = new long[maxSkipLevels];
-                posBufferUpto = new int[maxSkipLevels];
-                if (hasPayloads)
-                {
-                    payloadByteUpto = new int[maxSkipLevels];
-                }
-                else
-                {
-                    payloadByteUpto = null;
-                }
-                if (hasOffsets || hasPayloads)
-                {
-                    payPointer = new long[maxSkipLevels];
-                }
-                else
-                {
-                    payPointer = null;
-                }
-            }
-            else
-            {
-                posPointer = null;
-            }
-        }
-
-        /// <summary>
-        /// Trim original docFreq to tell skipReader read proper number of skip points.
-        ///
-        /// Since our definition in Lucene41Skip* is a little different from MultiLevelSkip*
-        /// this trimmed docFreq will prevent skipReader from:
-        /// 1. silly reading a non-existed skip point after the last block boundary
-        /// 2. moving into the vInt block
-        ///
-        /// </summary>
-        internal int Trim(int df)
-        {
-            return df % blockSize == 0 ? df - 1 : df;
-        }
-
-        public void Init(long skipPointer, long docBasePointer, long posBasePointer, long payBasePointer, int df)
-        {
-            base.Init(skipPointer, Trim(df));
-            lastDocPointer = docBasePointer;
-            lastPosPointer = posBasePointer;
-            lastPayPointer = payBasePointer;
-
-            Arrays.Fill(docPointer, docBasePointer);
-            if (posPointer != null)
-            {
-                Arrays.Fill(posPointer, posBasePointer);
-                if (payPointer != null)
-                {
-                    Arrays.Fill(payPointer, payBasePointer);
-                }
-            }
-            else
-            {
-                Debug.Assert(posBasePointer == 0);
-            }
-        }
-
-        /// <summary>
-        /// Returns the doc pointer of the doc to which the last call of
-        /// <seealso cref="MultiLevelSkipListReader#skipTo(int)"/> has skipped.
-        /// </summary>
-        public long DocPointer
-        {
-            get
-            {
-                return lastDocPointer;
-            }
-        }
-
-        public long PosPointer
-        {
-            get
-            {
-                return lastPosPointer;
-            }
-        }
-
-        public int PosBufferUpto
-        {
-            get
-            {
-                return lastPosBufferUpto;
-            }
-        }
-
-        public long PayPointer
-        {
-            get
-            {
-                return lastPayPointer;
-            }
-        }
-
-        public int PayloadByteUpto
-        {
-            get
-            {
-                return lastPayloadByteUpto;
-            }
-        }
-
-        public int NextSkipDoc
-        {
-            get
-            {
-                return m_skipDoc[0];
-            }
-        }
-
-        protected override void SeekChild(int level)
-        {
-            base.SeekChild(level);
-            // if (DEBUG) {
-            //   System.out.println("seekChild level=" + level);
-            // }
-            docPointer[level] = lastDocPointer;
-            if (posPointer != null)
-            {
-                posPointer[level] = lastPosPointer;
-                posBufferUpto[level] = lastPosBufferUpto;
-                if (payloadByteUpto != null)
-                {
-                    payloadByteUpto[level] = lastPayloadByteUpto;
-                }
-                if (payPointer != null)
-                {
-                    payPointer[level] = lastPayPointer;
-                }
-            }
-        }
-
-        protected override void SetLastSkipData(int level)
-        {
-            base.SetLastSkipData(level);
-            lastDocPointer = docPointer[level];
-            // if (DEBUG) {
-            //   System.out.println("setLastSkipData level=" + value);
-            //   System.out.println("  lastDocPointer=" + lastDocPointer);
-            // }
-            if (posPointer != null)
-            {
-                lastPosPointer = posPointer[level];
-                lastPosBufferUpto = posBufferUpto[level];
-                // if (DEBUG) {
-                //   System.out.println("  lastPosPointer=" + lastPosPointer + " lastPosBUfferUpto=" + lastPosBufferUpto);
-                // }
-                if (payPointer != null)
-                {
-                    lastPayPointer = payPointer[level];
-                }
-                if (payloadByteUpto != null)
-                {
-                    lastPayloadByteUpto = payloadByteUpto[level];
-                }
-            }
-        }
-
-        protected override int ReadSkipData(int level, IndexInput skipStream)
-        {
-            // if (DEBUG) {
-            //   System.out.println("readSkipData level=" + level);
-            // }
-            int delta = skipStream.ReadVInt32();
-            // if (DEBUG) {
-            //   System.out.println("  delta=" + delta);
-            // }
-            docPointer[level] += skipStream.ReadVInt32();
-            // if (DEBUG) {
-            //   System.out.println("  docFP=" + docPointer[level]);
-            // }
-
-            if (posPointer != null)
-            {
-                posPointer[level] += skipStream.ReadVInt32();
-                // if (DEBUG) {
-                //   System.out.println("  posFP=" + posPointer[level]);
-                // }
-                posBufferUpto[level] = skipStream.ReadVInt32();
-                // if (DEBUG) {
-                //   System.out.println("  posBufferUpto=" + posBufferUpto[level]);
-                // }
-
-                if (payloadByteUpto != null)
-                {
-                    payloadByteUpto[level] = skipStream.ReadVInt32();
-                }
-
-                if (payPointer != null)
-                {
-                    payPointer[level] += skipStream.ReadVInt32();
-                }
-            }
-            return delta;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41SkipWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41SkipWriter.cs b/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41SkipWriter.cs
deleted file mode 100644
index 3565a5d..0000000
--- a/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41SkipWriter.cs
+++ /dev/null
@@ -1,162 +0,0 @@
-using Lucene.Net.Support;
-
-namespace Lucene.Net.Codecs.Lucene41
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using IndexOutput = Lucene.Net.Store.IndexOutput;
-
-    /// <summary>
-    /// Write skip lists with multiple levels, and support skip within block ints.
-    ///
-    /// Assume that docFreq = 28, skipInterval = blockSize = 12
-    ///
-    ///  |       block#0       | |      block#1        | |vInts|
-    ///  d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
-    ///                          ^                       ^       (level 0 skip point)
-    ///
-    /// Note that skipWriter will ignore first document in block#0, since
-    /// it is useless as a skip point.  Also, we'll never skip into the vInts
-    /// block, only record skip data at the start its start point(if it exist).
-    ///
-    /// For each skip point, we will record:
-    /// 1. docID in former position, i.e. for position 12, record docID[11], etc.
-    /// 2. its related file points(position, payload),
-    /// 3. related numbers or uptos(position, payload).
-    /// 4. start offset.
-    ///
-    /// </summary>
-    internal sealed class Lucene41SkipWriter : MultiLevelSkipListWriter
-    {
-        // private boolean DEBUG = Lucene41PostingsReader.DEBUG;
-
-        private int[] lastSkipDoc;
-        private long[] lastSkipDocPointer;
-        private long[] lastSkipPosPointer;
-        private long[] lastSkipPayPointer;
-        private int[] lastPayloadByteUpto;
-
-        private readonly IndexOutput docOut;
-        private readonly IndexOutput posOut;
-        private readonly IndexOutput payOut;
-
-        private int curDoc;
-        private long curDocPointer;
-        private long curPosPointer;
-        private long curPayPointer;
-        private int curPosBufferUpto;
-        private int curPayloadByteUpto;
-        private bool fieldHasPositions;
-        private bool fieldHasOffsets;
-        private bool fieldHasPayloads;
-
-        public Lucene41SkipWriter(int maxSkipLevels, int blockSize, int docCount, IndexOutput docOut, IndexOutput posOut, IndexOutput payOut)
-            : base(blockSize, 8, maxSkipLevels, docCount)
-        {
-            this.docOut = docOut;
-            this.posOut = posOut;
-            this.payOut = payOut;
-
-            lastSkipDoc = new int[maxSkipLevels];
-            lastSkipDocPointer = new long[maxSkipLevels];
-            if (posOut != null)
-            {
-                lastSkipPosPointer = new long[maxSkipLevels];
-                if (payOut != null)
-                {
-                    lastSkipPayPointer = new long[maxSkipLevels];
-                }
-                lastPayloadByteUpto = new int[maxSkipLevels];
-            }
-        }
-
-        public void SetField(bool fieldHasPositions, bool fieldHasOffsets, bool fieldHasPayloads)
-        {
-            this.fieldHasPositions = fieldHasPositions;
-            this.fieldHasOffsets = fieldHasOffsets;
-            this.fieldHasPayloads = fieldHasPayloads;
-        }
-
-        public override void ResetSkip()
-        {
-            base.ResetSkip();
-            Arrays.Fill(lastSkipDoc, 0);
-            Arrays.Fill(lastSkipDocPointer, docOut.GetFilePointer());
-            if (fieldHasPositions)
-            {
-                Arrays.Fill(lastSkipPosPointer, posOut.GetFilePointer());
-                if (fieldHasPayloads)
-                {
-                    Arrays.Fill(lastPayloadByteUpto, 0);
-                }
-                if (fieldHasOffsets || fieldHasPayloads)
-                {
-                    Arrays.Fill(lastSkipPayPointer, payOut.GetFilePointer());
-                }
-            }
-        }
-
-        /// <summary>
-        /// Sets the values for the current skip data.
-        /// </summary>
-        public void BufferSkip(int doc, int numDocs, long posFP, long payFP, int posBufferUpto, int payloadByteUpto)
-        {
-            this.curDoc = doc;
-            this.curDocPointer = docOut.GetFilePointer();
-            this.curPosPointer = posFP;
-            this.curPayPointer = payFP;
-            this.curPosBufferUpto = posBufferUpto;
-            this.curPayloadByteUpto = payloadByteUpto;
-            BufferSkip(numDocs);
-        }
-
-        protected override void WriteSkipData(int level, IndexOutput skipBuffer)
-        {
-            int delta = curDoc - lastSkipDoc[level];
-            // if (DEBUG) {
-            //   System.out.println("writeSkipData level=" + level + " lastDoc=" + curDoc + " delta=" + delta + " curDocPointer=" + curDocPointer);
-            // }
-            skipBuffer.WriteVInt32(delta);
-            lastSkipDoc[level] = curDoc;
-
-            skipBuffer.WriteVInt32((int)(curDocPointer - lastSkipDocPointer[level]));
-            lastSkipDocPointer[level] = curDocPointer;
-
-            if (fieldHasPositions)
-            {
-                // if (DEBUG) {
-                //   System.out.println("  curPosPointer=" + curPosPointer + " curPosBufferUpto=" + curPosBufferUpto);
-                // }
-                skipBuffer.WriteVInt32((int)(curPosPointer - lastSkipPosPointer[level]));
-                lastSkipPosPointer[level] = curPosPointer;
-                skipBuffer.WriteVInt32(curPosBufferUpto);
-
-                if (fieldHasPayloads)
-                {
-                    skipBuffer.WriteVInt32(curPayloadByteUpto);
-                }
-
-                if (fieldHasOffsets || fieldHasPayloads)
-                {
-                    skipBuffer.WriteVInt32((int)(curPayPointer - lastSkipPayPointer[level]));
-                    lastSkipPayPointer[level] = curPayPointer;
-                }
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs b/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs
deleted file mode 100644
index 45e8d0c..0000000
--- a/src/Lucene.Net.Core/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs
+++ /dev/null
@@ -1,125 +0,0 @@
-namespace Lucene.Net.Codecs.Lucene41
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using CompressingStoredFieldsFormat = Lucene.Net.Codecs.Compressing.CompressingStoredFieldsFormat;
-    using CompressingStoredFieldsIndexWriter = Lucene.Net.Codecs.Compressing.CompressingStoredFieldsIndexWriter;
-    using CompressionMode = Lucene.Net.Codecs.Compressing.CompressionMode;
-    using Lucene40StoredFieldsFormat = Lucene.Net.Codecs.Lucene40.Lucene40StoredFieldsFormat;
-    using StoredFieldVisitor = Lucene.Net.Index.StoredFieldVisitor;
-
-    /// <summary>
-    /// Lucene 4.1 stored fields format.
-    ///
-    /// <p><b>Principle</b></p>
-    /// <p>this <seealso cref="StoredFieldsFormat"/> compresses blocks of 16KB of documents in
-    /// order to improve the compression ratio compared to document-level
-    /// compression. It uses the <a href="http://code.google.com/p/lz4/">LZ4</a>
-    /// compression algorithm, which is fast to compress and very fast to decompress
-    /// data. Although the compression method that is used focuses more on speed
-    /// than on compression ratio, it should provide interesting compression ratios
-    /// for redundant inputs (such as log files, HTML or plain text).</p>
-    /// <p><b>File formats</b></p>
-    /// <p>Stored fields are represented by two files:</p>
-    /// <ol>
-    /// <li><a name="field_data" id="field_data"></a>
-    /// <p>A fields data file (extension <tt>.fdt</tt>). this file stores a compact
-    /// representation of documents in compressed blocks of 16KB or more. When
-    /// writing a segment, documents are appended to an in-memory <tt>byte[]</tt>
-    /// buffer. When its size reaches 16KB or more, some metadata about the documents
-    /// is flushed to disk, immediately followed by a compressed representation of
-    /// the buffer using the
-    /// <a href="http://code.google.com/p/lz4/">LZ4</a>
-    /// <a href="http://fastcompression.blogspot.fr/2011/05/lz4-explained.html">compression format</a>.</p>
-    /// <p>Here is a more detailed description of the field data file format:</p>
-    /// <ul>
-    /// <li>FieldData (.fdt) --&gt; &lt;Header&gt;, PackedIntsVersion, &lt;Chunk&gt;<sup>ChunkCount</sup></li>
-    /// <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// <li>PackedIntsVersion --&gt; <seealso cref="PackedInts#VERSION_CURRENT"/> as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>ChunkCount is not known in advance and is the number of chunks necessary to store all document of the segment</li>
-    /// <li>Chunk --&gt; DocBase, ChunkDocs, DocFieldCounts, DocLengths, &lt;CompressedDocs&gt;</li>
-    /// <li>DocBase --&gt; the ID of the first document of the chunk as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>ChunkDocs --&gt; the number of documents in the chunk as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>DocFieldCounts --&gt; the number of stored fields of every document in the chunk, encoded as followed:<ul>
-    ///   <li>if chunkDocs=1, the unique value is encoded as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>else read a <seealso cref="DataOutput#writeVInt VInt"/> (let's call it <tt>bitsRequired</tt>)<ul>
-    ///     <li>if <tt>bitsRequired</tt> is <tt>0</tt> then all values are equal, and the common value is the following <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///     <li>else <tt>bitsRequired</tt> is the number of bits required to store any value, and values are stored in a <seealso cref="PackedInts packed"/> array where every value is stored on exactly <tt>bitsRequired</tt> bits</li>
-    ///   </ul></li>
-    /// </ul></li>
-    /// <li>DocLengths --&gt; the lengths of all documents in the chunk, encoded with the same method as DocFieldCounts</li>
-    /// <li>CompressedDocs --&gt; a compressed representation of &lt;Docs&gt; using the LZ4 compression format</li>
-    /// <li>Docs --&gt; &lt;Doc&gt;<sup>ChunkDocs</sup></li>
-    /// <li>Doc --&gt; &lt;FieldNumAndType, Value&gt;<sup>DocFieldCount</sup></li>
-    /// <li>FieldNumAndType --&gt; a <seealso cref="DataOutput#writeVLong VLong"/>, whose 3 last bits are Type and other bits are FieldNum</li>
-    /// <li>Type --&gt;<ul>
-    ///   <li>0: Value is String</li>
-    ///   <li>1: Value is BinaryValue</li>
-    ///   <li>2: Value is Int</li>
-    ///   <li>3: Value is Float</li>
-    ///   <li>4: Value is Long</li>
-    ///   <li>5: Value is Double</li>
-    ///   <li>6, 7: unused</li>
-    /// </ul></li>
-    /// <li>FieldNum --&gt; an ID of the field</li>
-    /// <li>Value --&gt; <seealso cref="DataOutput#writeString(String) String"/> | BinaryValue | Int | Float | Long | Double depending on Type</li>
-    /// <li>BinaryValue --&gt; ValueLength &lt;Byte&gt;<sup>ValueLength</sup></li>
-    /// </ul>
-    /// <p>Notes</p>
-    /// <ul>
-    /// <li>If documents are larger than 16KB then chunks will likely contain only
-    /// one document. However, documents can never spread across several chunks (all
-    /// fields of a single document are in the same chunk).</li>
-    /// <li>When at least one document in a chunk is large enough so that the chunk
-    /// is larger than 32KB, the chunk will actually be compressed in several LZ4
-    /// blocks of 16KB. this allows <seealso cref="StoredFieldVisitor"/>s which are only
-    /// interested in the first fields of a document to not have to decompress 10MB
-    /// of data if the document is 10MB, but only 16KB.</li>
-    /// <li>Given that the original lengths are written in the metadata of the chunk,
-    /// the decompressor can leverage this information to stop decoding as soon as
-    /// enough data has been decompressed.</li>
-    /// <li>In case documents are incompressible, CompressedDocs will be less than
-    /// 0.5% larger than Docs.</li>
-    /// </ul>
-    /// </li>
-    /// <li><a name="field_index" id="field_index"></a>
-    /// <p>A fields index file (extension <tt>.fdx</tt>).</p>
-    /// <ul>
-    /// <li>FieldsIndex (.fdx) --&gt; &lt;Header&gt;, &lt;ChunkIndex&gt;</li>
-    /// <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// <li>ChunkIndex: See <seealso cref="CompressingStoredFieldsIndexWriter"/></li>
-    /// </ul>
-    /// </li>
-    /// </ol>
-    /// <p><b>Known limitations</b></p>
-    /// <p>this <seealso cref="StoredFieldsFormat"/> does not support individual documents
-    /// larger than (<tt>2<sup>31</sup> - 2<sup>14</sup></tt>) bytes. In case this
-    /// is a problem, you should use another format, such as
-    /// <seealso cref="Lucene40StoredFieldsFormat"/>.</p>
-    /// @lucene.experimental
-    /// </summary>
-    public sealed class Lucene41StoredFieldsFormat : CompressingStoredFieldsFormat
-    {
-        /// <summary>
-        /// Sole constructor. </summary>
-        public Lucene41StoredFieldsFormat()
-            : base("Lucene41StoredFields", CompressionMode.FAST, 1 << 14)
-        {
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Lucene42/Lucene42Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Lucene42/Lucene42Codec.cs b/src/Lucene.Net.Core/Codecs/Lucene42/Lucene42Codec.cs
deleted file mode 100644
index 3972a5e..0000000
--- a/src/Lucene.Net.Core/Codecs/Lucene42/Lucene42Codec.cs
+++ /dev/null
@@ -1,174 +0,0 @@
-using System;
-
-namespace Lucene.Net.Codecs.Lucene42
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using Lucene40LiveDocsFormat = Lucene.Net.Codecs.Lucene40.Lucene40LiveDocsFormat;
-    using Lucene40SegmentInfoFormat = Lucene.Net.Codecs.Lucene40.Lucene40SegmentInfoFormat;
-    using Lucene41StoredFieldsFormat = Lucene.Net.Codecs.Lucene41.Lucene41StoredFieldsFormat;
-    using PerFieldDocValuesFormat = Lucene.Net.Codecs.PerField.PerFieldDocValuesFormat;
-    using PerFieldPostingsFormat = Lucene.Net.Codecs.PerField.PerFieldPostingsFormat;
-    using SegmentWriteState = Lucene.Net.Index.SegmentWriteState;
-
-    /// <summary>
-    /// Implements the Lucene 4.2 index format, with configurable per-field postings
-    /// and docvalues formats.
-    /// <p>
-    /// If you want to reuse functionality of this codec in another codec, extend
-    /// <seealso cref="FilterCodec"/>.
-    /// </summary>
-    /// <seealso cref= Lucene.Net.Codecs.Lucene42 package documentation for file format details.
-    /// @lucene.experimental </seealso>
-    /// @deprecated Only for reading old 4.2 segments
-    // NOTE: if we make largish changes in a minor release, easier to just make Lucene43Codec or whatever
-    // if they are backwards compatible or smallish we can probably do the backwards in the postingsreader
-    // (it writes a minor version, etc).
-    [Obsolete("Only for reading old 4.2 segments")]
-    [CodecName("Lucene42")] // LUCENENET specific - using CodecName attribute to ensure the default name passed from subclasses is the same as this class name
-    public class Lucene42Codec : Codec
-    {
-        private readonly StoredFieldsFormat fieldsFormat = new Lucene41StoredFieldsFormat();
-        private readonly TermVectorsFormat vectorsFormat = new Lucene42TermVectorsFormat();
-        private readonly FieldInfosFormat fieldInfosFormat = new Lucene42FieldInfosFormat();
-        private readonly SegmentInfoFormat infosFormat = new Lucene40SegmentInfoFormat();
-        private readonly LiveDocsFormat liveDocsFormat = new Lucene40LiveDocsFormat();
-
-        private readonly PostingsFormat postingsFormat;
-
-        private class PerFieldPostingsFormatAnonymousInnerClassHelper : PerFieldPostingsFormat
-        {
-            private readonly Lucene42Codec outerInstance;
-
-            public PerFieldPostingsFormatAnonymousInnerClassHelper(Lucene42Codec outerInstance)
-            {
-                this.outerInstance = outerInstance;
-            }
-
-            public override PostingsFormat GetPostingsFormatForField(string field)
-            {
-                return outerInstance.GetPostingsFormatForField(field);
-            }
-        }
-
-        private readonly DocValuesFormat docValuesFormat;
-
-        private class PerFieldDocValuesFormatAnonymousInnerClassHelper : PerFieldDocValuesFormat
-        {
-            private readonly Lucene42Codec outerInstance;
-
-            public PerFieldDocValuesFormatAnonymousInnerClassHelper(Lucene42Codec outerInstance)
-            {
-                this.outerInstance = outerInstance;
-            }
-
-            public override DocValuesFormat GetDocValuesFormatForField(string field)
-            {
-                return outerInstance.GetDocValuesFormatForField(field);
-            }
-        }
-
-        /// <summary>
-        /// Sole constructor. </summary>
-        public Lucene42Codec()
-            : base()
-        {
-            postingsFormat = new PerFieldPostingsFormatAnonymousInnerClassHelper(this);
-            docValuesFormat = new PerFieldDocValuesFormatAnonymousInnerClassHelper(this);
-        }
-
-        public override sealed StoredFieldsFormat StoredFieldsFormat
-        {
-            get { return fieldsFormat; }
-        }
-
-        public override sealed TermVectorsFormat TermVectorsFormat
-        {
-            get { return vectorsFormat; }
-        }
-
-        public override sealed PostingsFormat PostingsFormat
-        {
-            get { return postingsFormat; }
-        }
-
-        public override FieldInfosFormat FieldInfosFormat
-        {
-            get { return fieldInfosFormat; }
-        }
-
-        public override SegmentInfoFormat SegmentInfoFormat
-        {
-            get { return infosFormat; }
-        }
-
-        public override sealed LiveDocsFormat LiveDocsFormat
-        {
-            get { return liveDocsFormat; }
-        }
-
-        /// <summary>
-        /// Returns the postings format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene41"
-        /// </summary>
-        public virtual PostingsFormat GetPostingsFormatForField(string field)
-        {
-            return defaultFormat;
-        }
-
-        /// <summary>
-        /// Returns the docvalues format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene42"
-        /// </summary>
-        public virtual DocValuesFormat GetDocValuesFormatForField(string field)
-        {
-            return defaultDVFormat;
-        }
-
-        public override sealed DocValuesFormat DocValuesFormat
-        {
-            get { return docValuesFormat; }
-        }
-
-        private readonly PostingsFormat defaultFormat = Codecs.PostingsFormat.ForName("Lucene41");
-        private readonly DocValuesFormat defaultDVFormat = Codecs.DocValuesFormat.ForName("Lucene42");
-
-        private readonly NormsFormat normsFormat = new Lucene42NormsFormatAnonymousInnerClassHelper();
-
-        private class Lucene42NormsFormatAnonymousInnerClassHelper : Lucene42NormsFormat
-        {
-            public Lucene42NormsFormatAnonymousInnerClassHelper()
-            {
-            }
-
-            public override DocValuesConsumer NormsConsumer(SegmentWriteState state)
-            {
-                throw new System.NotSupportedException("this codec can only be used for reading");
-            }
-        }
-
-        public override NormsFormat NormsFormat
-        {
-            get { return normsFormat; }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Lucene42/Lucene42DocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Lucene42/Lucene42DocValuesFormat.cs b/src/Lucene.Net.Core/Codecs/Lucene42/Lucene42DocValuesFormat.cs
deleted file mode 100644
index 82e8c89..0000000
--- a/src/Lucene.Net.Core/Codecs/Lucene42/Lucene42DocValuesFormat.cs
+++ /dev/null
@@ -1,168 +0,0 @@
-using System;
-
-namespace Lucene.Net.Codecs.Lucene42
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using PackedInt32s = Lucene.Net.Util.Packed.PackedInt32s;
-    using SegmentReadState = Lucene.Net.Index.SegmentReadState;
-    using SegmentWriteState = Lucene.Net.Index.SegmentWriteState;
-
-    /// <summary>
-    /// Lucene 4.2 DocValues format.
-    /// <p>
-    /// Encodes the four per-document value types (Numeric,Binary,Sorted,SortedSet) with seven basic strategies.
-    /// <p>
-    /// <ul>
-    ///    <li>Delta-compressed Numerics: per-document integers written in blocks of 4096. For each block
-    ///        the minimum value is encoded, and each entry is a delta from that minimum value.
-    ///    <li>Table-compressed Numerics: when the number of unique values is very small, a lookup table
-    ///        is written instead. Each per-document entry is instead the ordinal to this table.
-    ///    <li>Uncompressed Numerics: when all values would fit into a single byte, and the
-    ///        <code>acceptableOverheadRatio</code> would pack values into 8 bits per value anyway, they
-    ///        are written as absolute values (with no indirection or packing) for performance.
-    ///    <li>GCD-compressed Numerics: when all numbers share a common divisor, such as dates, the greatest
-    ///        common denominator (GCD) is computed, and quotients are stored using Delta-compressed Numerics.
-    ///    <li>Fixed-width Binary: one large concatenated byte[] is written, along with the fixed length.
-    ///        Each document's value can be addressed by maxDoc*length.
-    ///    <li>Variable-width Binary: one large concatenated byte[] is written, along with end addresses
-    ///        for each document. The addresses are written in blocks of 4096, with the current absolute
-    ///        start for the block, and the average (expected) delta per entry. For each document the
-    ///        deviation from the delta (actual - expected) is written.
-    ///    <li>Sorted: an FST mapping deduplicated terms to ordinals is written, along with the per-document
-    ///        ordinals written using one of the numeric strategies above.
-    ///    <li>SortedSet: an FST mapping deduplicated terms to ordinals is written, along with the per-document
-    ///        ordinal list written using one of the binary strategies above.
-    /// </ul>
-    /// <p>
-    /// Files:
-    /// <ol>
-    ///   <li><tt>.dvd</tt>: DocValues data</li>
-    ///   <li><tt>.dvm</tt>: DocValues metadata</li>
-    /// </ol>
-    /// <ol>
-    ///   <li><a name="dvm" id="dvm"></a>
-    ///   <p>The DocValues metadata or .dvm file.</p>
-    ///   <p>For DocValues field, this stores metadata, such as the offset into the
-    ///      DocValues data (.dvd)</p>
-    ///   <p>DocValues metadata (.dvm) --&gt; Header,&lt;FieldNumber,EntryType,Entry&gt;<sup>NumFields</sup>,Footer</p>
-    ///   <ul>
-    ///     <li>Entry --&gt; NumericEntry | BinaryEntry | SortedEntry</li>
-    ///     <li>NumericEntry --&gt; DataOffset,CompressionType,PackedVersion</li>
-    ///     <li>BinaryEntry --&gt; DataOffset,DataLength,MinLength,MaxLength,PackedVersion?,BlockSize?</li>
-    ///     <li>SortedEntry --&gt; DataOffset,ValueCount</li>
-    ///     <li>FieldNumber,PackedVersion,MinLength,MaxLength,BlockSize,ValueCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///     <li>DataOffset,DataLength --&gt; <seealso cref="DataOutput#writeLong Int64"/></li>
-    ///     <li>EntryType,CompressionType --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///     <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///     <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    ///   </ul>
-    ///   <p>Sorted fields have two entries: a SortedEntry with the FST metadata,
-    ///      and an ordinary NumericEntry for the document-to-ord metadata.</p>
-    ///   <p>SortedSet fields have two entries: a SortedEntry with the FST metadata,
-    ///      and an ordinary BinaryEntry for the document-to-ord-list metadata.</p>
-    ///   <p>FieldNumber of -1 indicates the end of metadata.</p>
-    ///   <p>EntryType is a 0 (NumericEntry), 1 (BinaryEntry, or 2 (SortedEntry)</p>
-    ///   <p>DataOffset is the pointer to the start of the data in the DocValues data (.dvd)</p>
-    ///   <p>CompressionType indicates how Numeric values will be compressed:
-    ///      <ul>
-    ///         <li>0 --&gt; delta-compressed. For each block of 4096 integers, every integer is delta-encoded
-    ///             from the minimum value within the block.
-    ///         <li>1 --&gt; table-compressed. When the number of unique numeric values is small and it would save space,
-    ///             a lookup table of unique values is written, followed by the ordinal for each document.
-    ///         <li>2 --&gt; uncompressed. When the <code>acceptableOverheadRatio</code> parameter would upgrade the number
-    ///             of bits required to 8, and all values fit in a byte, these are written as absolute binary values
-    ///             for performance.
-    ///         <li>3 --&gt, gcd-compressed. When all integers share a common divisor, only quotients are stored
-    ///             using blocks of delta-encoded ints.
-    ///      </ul>
-    ///   <p>MinLength and MaxLength represent the min and max byte[] value lengths for Binary values.
-    ///      If they are equal, then all values are of a fixed size, and can be addressed as DataOffset + (docID * length).
-    ///      Otherwise, the binary values are of variable size, and packed integer metadata (PackedVersion,BlockSize)
-    ///      is written for the addresses.
-    ///   <li><a name="dvd" id="dvd"></a>
-    ///   <p>The DocValues data or .dvd file.</p>
-    ///   <p>For DocValues field, this stores the actual per-document data (the heavy-lifting)</p>
-    ///   <p>DocValues data (.dvd) --&gt; Header,&lt;NumericData | BinaryData | SortedData&gt;<sup>NumFields</sup>,Footer</p>
-    ///   <ul>
-    ///     <li>NumericData --&gt; DeltaCompressedNumerics | TableCompressedNumerics | UncompressedNumerics | GCDCompressedNumerics</li>
-    ///     <li>BinaryData --&gt;  <seealso cref="DataOutput#writeByte Byte"/><sup>DataLength</sup>,Addresses</li>
-    ///     <li>SortedData --&gt; <seealso cref="FST FST&lt;Int64&gt;"/></li>
-    ///     <li>DeltaCompressedNumerics --&gt; <seealso cref="BlockPackedWriter BlockPackedInts(blockSize=4096)"/></li>
-    ///     <li>TableCompressedNumerics --&gt; TableSize,<seealso cref="DataOutput#writeLong Int64"/><sup>TableSize</sup>,<seealso cref="PackedInt32s PackedInts"/></li>
-    ///     <li>UncompressedNumerics --&gt; <seealso cref="DataOutput#writeByte Byte"/><sup>maxdoc</sup></li>
-    ///     <li>Addresses --&gt; <seealso cref="MonotonicBlockPackedWriter MonotonicBlockPackedInts(blockSize=4096)"/></li>
-    ///     <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    ///   </ul>
-    ///   <p>SortedSet entries store the list of ordinals in their BinaryData as a
-    ///      sequences of increasing <seealso cref="DataOutput#writeVLong vLong"/>s, delta-encoded.</p>
-    /// </ol>
-    /// <p>
-    /// Limitations:
-    /// <ul>
-    ///   <li> Binary doc values can be at most <seealso cref="#MAX_BINARY_FIELD_LENGTH"/> in length.
-    /// </ul> </summary>
-    /// @deprecated Only for reading old 4.2 segments
-    [Obsolete("Only for reading old 4.2 segments")]
-    [DocValuesFormatName("Lucene42")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name
-    public class Lucene42DocValuesFormat : DocValuesFormat
-    {
-        /// <summary>
-        /// Maximum length for each binary doc values field. </summary>
-        public static readonly int MAX_BINARY_FIELD_LENGTH = (1 << 15) - 2;
-
-        protected readonly float m_acceptableOverheadRatio;
-
-        /// <summary>
-        /// Calls {@link #Lucene42DocValuesFormat(float)
-        /// Lucene42DocValuesFormat(PackedInts.DEFAULT)}
-        /// </summary>
-        public Lucene42DocValuesFormat()
-            : this(PackedInt32s.DEFAULT)
-        {
-        }
-
-        /// <summary>
-        /// Creates a new Lucene42DocValuesFormat with the specified
-        /// <code>acceptableOverheadRatio</code> for NumericDocValues. </summary>
-        /// <param name="acceptableOverheadRatio"> compression parameter for numerics.
-        ///        Currently this is only used when the number of unique values is small.
-        ///
-        /// @lucene.experimental </param>
-        public Lucene42DocValuesFormat(float acceptableOverheadRatio)
-            : base()
-        {
-            this.m_acceptableOverheadRatio = acceptableOverheadRatio;
-        }
-
-        public override DocValuesConsumer FieldsConsumer(SegmentWriteState state)
-        {
-            throw new System.NotSupportedException("this codec can only be used for reading");
-        }
-
-        public override DocValuesProducer FieldsProducer(SegmentReadState state)
-        {
-            return new Lucene42DocValuesProducer(state, DATA_CODEC, DATA_EXTENSION, METADATA_CODEC, METADATA_EXTENSION);
-        }
-
-        internal const string DATA_CODEC = "Lucene42DocValuesData";
-        internal const string DATA_EXTENSION = "dvd";
-        internal const string METADATA_CODEC = "Lucene42DocValuesMetadata";
-        internal const string METADATA_EXTENSION = "dvm";
-    }
-}
\ No newline at end of file


Mime
View raw message