lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [43/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:49 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Compressing/CompressingTermVectorsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Compressing/CompressingTermVectorsWriter.cs b/src/Lucene.Net.Core/Codecs/Compressing/CompressingTermVectorsWriter.cs
deleted file mode 100644
index 042c319..0000000
--- a/src/Lucene.Net.Core/Codecs/Compressing/CompressingTermVectorsWriter.cs
+++ /dev/null
@@ -1,987 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Linq;
-using ArrayUtil = Lucene.Net.Util.ArrayUtil;
-
-namespace Lucene.Net.Codecs.Compressing
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using AtomicReader = Lucene.Net.Index.AtomicReader;
-    using IBits = Lucene.Net.Util.IBits;
-    using BlockPackedWriter = Lucene.Net.Util.Packed.BlockPackedWriter;
-    using BufferedChecksumIndexInput = Lucene.Net.Store.BufferedChecksumIndexInput;
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using ChecksumIndexInput = Lucene.Net.Store.ChecksumIndexInput;
-    using DataInput = Lucene.Net.Store.DataInput;
-    using Directory = Lucene.Net.Store.Directory;
-    using FieldInfo = Lucene.Net.Index.FieldInfo;
-    using FieldInfos = Lucene.Net.Index.FieldInfos;
-    using Fields = Lucene.Net.Index.Fields;
-    using GrowableByteArrayDataOutput = Lucene.Net.Util.GrowableByteArrayDataOutput;
-    using IndexFileNames = Lucene.Net.Index.IndexFileNames;
-    using IndexInput = Lucene.Net.Store.IndexInput;
-    using IndexOutput = Lucene.Net.Store.IndexOutput;
-    using IOContext = Lucene.Net.Store.IOContext;
-    using IOUtils = Lucene.Net.Util.IOUtils;
-    using MergeState = Lucene.Net.Index.MergeState;
-    using PackedInt32s = Lucene.Net.Util.Packed.PackedInt32s;
-    using SegmentInfo = Lucene.Net.Index.SegmentInfo;
-    using SegmentReader = Lucene.Net.Index.SegmentReader;
-    using StringHelper = Lucene.Net.Util.StringHelper;
-
-    /// <summary>
-    /// <seealso cref="TermVectorsWriter"/> for <seealso cref="CompressingTermVectorsFormat"/>.
-    /// @lucene.experimental
-    /// </summary>
-    public sealed class CompressingTermVectorsWriter : TermVectorsWriter
-    {
-        // hard limit on the maximum number of documents per chunk
-        internal const int MAX_DOCUMENTS_PER_CHUNK = 128;
-
-        internal const string VECTORS_EXTENSION = "tvd";
-        internal const string VECTORS_INDEX_EXTENSION = "tvx";
-
-        internal const string CODEC_SFX_IDX = "Index";
-        internal const string CODEC_SFX_DAT = "Data";
-
-        internal const int VERSION_START = 0;
-        internal const int VERSION_CHECKSUM = 1;
-        internal const int VERSION_CURRENT = VERSION_CHECKSUM;
-
-        internal const int BLOCK_SIZE = 64;
-
-        internal const int POSITIONS = 0x01;
-        internal const int OFFSETS = 0x02;
-        internal const int PAYLOADS = 0x04;
-        internal static readonly int FLAGS_BITS = PackedInt32s.BitsRequired(POSITIONS | OFFSETS | PAYLOADS);
-
-        private readonly Directory directory;
-        private readonly string segment;
-        private readonly string segmentSuffix;
-        private CompressingStoredFieldsIndexWriter indexWriter;
-        private IndexOutput vectorsStream;
-
-        private readonly CompressionMode compressionMode;
-        private readonly Compressor compressor;
-        private readonly int chunkSize;
-
-        /// <summary>
-        /// a pending doc </summary>
-        private class DocData
-        {
-            private readonly CompressingTermVectorsWriter outerInstance;
-
-            internal readonly int numFields;
-            internal readonly LinkedList<FieldData> fields;
-            internal readonly int posStart, offStart, payStart;
-
-            internal DocData(CompressingTermVectorsWriter outerInstance, int numFields, int posStart, int offStart, int payStart)
-            {
-                this.outerInstance = outerInstance;
-                this.numFields = numFields;
-                this.fields = new LinkedList<FieldData>();
-                this.posStart = posStart;
-                this.offStart = offStart;
-                this.payStart = payStart;
-            }
-
-            internal virtual FieldData AddField(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads)
-            {
-                FieldData field;
-                if (fields.Count == 0)
-                {
-                    field = new FieldData(outerInstance, fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
-                }
-                else
-                {
-                    FieldData last = fields.Last.Value;
-                    int posStart = last.posStart + (last.hasPositions ? last.totalPositions : 0);
-                    int offStart = last.offStart + (last.hasOffsets ? last.totalPositions : 0);
-                    int payStart = last.payStart + (last.hasPayloads ? last.totalPositions : 0);
-                    field = new FieldData(outerInstance, fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
-                }
-                fields.AddLast(field);
-                return field;
-            }
-        }
-
-        private DocData AddDocData(int numVectorFields)
-        {
-            FieldData last = null;
-            //for (IEnumerator<DocData> it = PendingDocs.Reverse(); it.MoveNext();)
-            foreach (DocData doc in pendingDocs.Reverse())
-            {
-                if (!(doc.fields.Count == 0))
-                {
-                    last = doc.fields.Last.Value;
-                    break;
-                }
-            }
-            DocData newDoc;
-            if (last == null)
-            {
-                newDoc = new DocData(this, numVectorFields, 0, 0, 0);
-            }
-            else
-            {
-                int posStart = last.posStart + (last.hasPositions ? last.totalPositions : 0);
-                int offStart = last.offStart + (last.hasOffsets ? last.totalPositions : 0);
-                int payStart = last.payStart + (last.hasPayloads ? last.totalPositions : 0);
-                newDoc = new DocData(this, numVectorFields, posStart, offStart, payStart);
-            }
-            pendingDocs.AddLast(newDoc);
-            return newDoc;
-        }
-
-        /// <summary>
-        /// a pending field </summary>
-        private class FieldData
-        {
-            private readonly CompressingTermVectorsWriter outerInstance;
-
-            internal readonly bool hasPositions, hasOffsets, hasPayloads;
-            internal readonly int fieldNum, flags, numTerms;
-            internal readonly int[] freqs, prefixLengths, suffixLengths;
-            internal readonly int posStart, offStart, payStart;
-            internal int totalPositions;
-            internal int ord;
-
-            internal FieldData(CompressingTermVectorsWriter outerInstance, int fieldNum, int numTerms, bool positions, bool offsets, bool payloads, int posStart, int offStart, int payStart)
-            {
-                this.outerInstance = outerInstance;
-                this.fieldNum = fieldNum;
-                this.numTerms = numTerms;
-                this.hasPositions = positions;
-                this.hasOffsets = offsets;
-                this.hasPayloads = payloads;
-                this.flags = (positions ? POSITIONS : 0) | (offsets ? OFFSETS : 0) | (payloads ? PAYLOADS : 0);
-                this.freqs = new int[numTerms];
-                this.prefixLengths = new int[numTerms];
-                this.suffixLengths = new int[numTerms];
-                this.posStart = posStart;
-                this.offStart = offStart;
-                this.payStart = payStart;
-                totalPositions = 0;
-                ord = 0;
-            }
-
-            internal virtual void AddTerm(int freq, int prefixLength, int suffixLength)
-            {
-                freqs[ord] = freq;
-                prefixLengths[ord] = prefixLength;
-                suffixLengths[ord] = suffixLength;
-                ++ord;
-            }
-
-            internal virtual void AddPosition(int position, int startOffset, int length, int payloadLength)
-            {
-                if (hasPositions)
-                {
-                    if (posStart + totalPositions == outerInstance.positionsBuf.Length)
-                    {
-                        outerInstance.positionsBuf = ArrayUtil.Grow(outerInstance.positionsBuf);
-                    }
-                    outerInstance.positionsBuf[posStart + totalPositions] = position;
-                }
-                if (hasOffsets)
-                {
-                    if (offStart + totalPositions == outerInstance.startOffsetsBuf.Length)
-                    {
-                        int newLength = ArrayUtil.Oversize(offStart + totalPositions, 4);
-                        outerInstance.startOffsetsBuf = Arrays.CopyOf(outerInstance.startOffsetsBuf, newLength);
-                        outerInstance.lengthsBuf = Arrays.CopyOf(outerInstance.lengthsBuf, newLength);
-                    }
-                    outerInstance.startOffsetsBuf[offStart + totalPositions] = startOffset;
-                    outerInstance.lengthsBuf[offStart + totalPositions] = length;
-                }
-                if (hasPayloads)
-                {
-                    if (payStart + totalPositions == outerInstance.payloadLengthsBuf.Length)
-                    {
-                        outerInstance.payloadLengthsBuf = ArrayUtil.Grow(outerInstance.payloadLengthsBuf);
-                    }
-                    outerInstance.payloadLengthsBuf[payStart + totalPositions] = payloadLength;
-                }
-                ++totalPositions;
-            }
-        }
-
-        private int numDocs; // total number of docs seen
-        private readonly LinkedList<DocData> pendingDocs; // pending docs
-        private DocData curDoc; // current document
-        private FieldData curField; // current field
-        private readonly BytesRef lastTerm;
-        private int[] positionsBuf, startOffsetsBuf, lengthsBuf, payloadLengthsBuf;
-        private readonly GrowableByteArrayDataOutput termSuffixes; // buffered term suffixes
-        private readonly GrowableByteArrayDataOutput payloadBytes; // buffered term payloads
-        private readonly BlockPackedWriter writer;
-
-        /// <summary>
-        /// Sole constructor. </summary>
-        public CompressingTermVectorsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context, string formatName, CompressionMode compressionMode, int chunkSize)
-        {
-            Debug.Assert(directory != null);
-            this.directory = directory;
-            this.segment = si.Name;
-            this.segmentSuffix = segmentSuffix;
-            this.compressionMode = compressionMode;
-            this.compressor = compressionMode.NewCompressor();
-            this.chunkSize = chunkSize;
-
-            numDocs = 0;
-            pendingDocs = new LinkedList<DocData>();
-            termSuffixes = new GrowableByteArrayDataOutput(ArrayUtil.Oversize(chunkSize, 1));
-            payloadBytes = new GrowableByteArrayDataOutput(ArrayUtil.Oversize(1, 1));
-            lastTerm = new BytesRef(ArrayUtil.Oversize(30, 1));
-
-            bool success = false;
-            IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION), context);
-            try
-            {
-                vectorsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION), context);
-
-                string codecNameIdx = formatName + CODEC_SFX_IDX;
-                string codecNameDat = formatName + CODEC_SFX_DAT;
-                CodecUtil.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT);
-                CodecUtil.WriteHeader(vectorsStream, codecNameDat, VERSION_CURRENT);
-                Debug.Assert(CodecUtil.HeaderLength(codecNameDat) == vectorsStream.GetFilePointer());
-                Debug.Assert(CodecUtil.HeaderLength(codecNameIdx) == indexStream.GetFilePointer());
-
-                indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
-                indexStream = null;
-
-                vectorsStream.WriteVInt32(PackedInt32s.VERSION_CURRENT);
-                vectorsStream.WriteVInt32(chunkSize);
-                writer = new BlockPackedWriter(vectorsStream, BLOCK_SIZE);
-
-                positionsBuf = new int[1024];
-                startOffsetsBuf = new int[1024];
-                lengthsBuf = new int[1024];
-                payloadLengthsBuf = new int[1024];
-
-                success = true;
-            }
-            finally
-            {
-                if (!success)
-                {
-                    IOUtils.CloseWhileHandlingException(indexStream);
-                    Abort();
-                }
-            }
-        }
-
-        protected override void Dispose(bool disposing)
-        {
-            if (disposing)
-            {
-                try
-                {
-                    IOUtils.Close(vectorsStream, indexWriter);
-                }
-                finally
-                {
-                    vectorsStream = null;
-                    indexWriter = null;
-                }
-            }
-        }
-
-        public override void Abort()
-        {
-            IOUtils.CloseWhileHandlingException(this);
-            IOUtils.DeleteFilesIgnoringExceptions(directory, IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION), IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION));
-        }
-
-        public override void StartDocument(int numVectorFields)
-        {
-            curDoc = AddDocData(numVectorFields);
-        }
-
-        public override void FinishDocument()
-        {
-            // append the payload bytes of the doc after its terms
-            termSuffixes.WriteBytes(payloadBytes.Bytes, payloadBytes.Length);
-            payloadBytes.Length = 0;
-            ++numDocs;
-            if (TriggerFlush())
-            {
-                Flush();
-            }
-            curDoc = null;
-        }
-
-        public override void StartField(FieldInfo info, int numTerms, bool positions, bool offsets, bool payloads)
-        {
-            curField = curDoc.AddField(info.Number, numTerms, positions, offsets, payloads);
-            lastTerm.Length = 0;
-        }
-
-        public override void FinishField()
-        {
-            curField = null;
-        }
-
-        public override void StartTerm(BytesRef term, int freq)
-        {
-            Debug.Assert(freq >= 1);
-            int prefix = StringHelper.BytesDifference(lastTerm, term);
-            curField.AddTerm(freq, prefix, term.Length - prefix);
-            termSuffixes.WriteBytes(term.Bytes, term.Offset + prefix, term.Length - prefix);
-            // copy last term
-            if (lastTerm.Bytes.Length < term.Length)
-            {
-                lastTerm.Bytes = new byte[ArrayUtil.Oversize(term.Length, 1)];
-            }
-            lastTerm.Offset = 0;
-            lastTerm.Length = term.Length;
-            Array.Copy(term.Bytes, term.Offset, lastTerm.Bytes, 0, term.Length);
-        }
-
-        public override void AddPosition(int position, int startOffset, int endOffset, BytesRef payload)
-        {
-            Debug.Assert(curField.flags != 0);
-            curField.AddPosition(position, startOffset, endOffset - startOffset, payload == null ? 0 : payload.Length);
-            if (curField.hasPayloads && payload != null)
-            {
-                payloadBytes.WriteBytes(payload.Bytes, payload.Offset, payload.Length);
-            }
-        }
-
-        private bool TriggerFlush()
-        {
-            return termSuffixes.Length >= chunkSize || pendingDocs.Count >= MAX_DOCUMENTS_PER_CHUNK;
-        }
-
-        private void Flush()
-        {
-            int chunkDocs = pendingDocs.Count;
-            Debug.Assert(chunkDocs > 0, chunkDocs.ToString());
-
-            // write the index file
-            indexWriter.WriteIndex(chunkDocs, vectorsStream.GetFilePointer());
-
-            int docBase = numDocs - chunkDocs;
-            vectorsStream.WriteVInt32(docBase);
-            vectorsStream.WriteVInt32(chunkDocs);
-
-            // total number of fields of the chunk
-            int totalFields = FlushNumFields(chunkDocs);
-
-            if (totalFields > 0)
-            {
-                // unique field numbers (sorted)
-                int[] fieldNums = FlushFieldNums();
-                // offsets in the array of unique field numbers
-                FlushFields(totalFields, fieldNums);
-                // flags (does the field have positions, offsets, payloads?)
-                FlushFlags(totalFields, fieldNums);
-                // number of terms of each field
-                FlushNumTerms(totalFields);
-                // prefix and suffix lengths for each field
-                FlushTermLengths();
-                // term freqs - 1 (because termFreq is always >=1) for each term
-                FlushTermFreqs();
-                // positions for all terms, when enabled
-                FlushPositions();
-                // offsets for all terms, when enabled
-                FlushOffsets(fieldNums);
-                // payload lengths for all terms, when enabled
-                FlushPayloadLengths();
-
-                // compress terms and payloads and write them to the output
-                compressor.Compress(termSuffixes.Bytes, 0, termSuffixes.Length, vectorsStream);
-            }
-
-            // reset
-            pendingDocs.Clear();
-            curDoc = null;
-            curField = null;
-            termSuffixes.Length = 0;
-        }
-
-        private int FlushNumFields(int chunkDocs)
-        {
-            if (chunkDocs == 1)
-            {
-                int numFields = pendingDocs.First.Value.numFields;
-                vectorsStream.WriteVInt32(numFields);
-                return numFields;
-            }
-            else
-            {
-                writer.Reset(vectorsStream);
-                int totalFields = 0;
-                foreach (DocData dd in pendingDocs)
-                {
-                    writer.Add(dd.numFields);
-                    totalFields += dd.numFields;
-                }
-                writer.Finish();
-                return totalFields;
-            }
-        }
-
-        /// <summary>
-        /// Returns a sorted array containing unique field numbers </summary>
-        private int[] FlushFieldNums()
-        {
-            SortedSet<int> fieldNums = new SortedSet<int>();
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    fieldNums.Add(fd.fieldNum);
-                }
-            }
-
-            int numDistinctFields = fieldNums.Count;
-            Debug.Assert(numDistinctFields > 0);
-            int bitsRequired = PackedInt32s.BitsRequired(fieldNums.Max);
-            int token = (Math.Min(numDistinctFields - 1, 0x07) << 5) | bitsRequired;
-            vectorsStream.WriteByte((byte)(sbyte)token);
-            if (numDistinctFields - 1 >= 0x07)
-            {
-                vectorsStream.WriteVInt32(numDistinctFields - 1 - 0x07);
-            }
-            PackedInt32s.Writer writer = PackedInt32s.GetWriterNoHeader(vectorsStream, PackedInt32s.Format.PACKED, fieldNums.Count, bitsRequired, 1);
-            foreach (int fieldNum in fieldNums)
-            {
-                writer.Add(fieldNum);
-            }
-            writer.Finish();
-
-            int[] fns = new int[fieldNums.Count];
-            int i = 0;
-            foreach (int key in fieldNums)
-            {
-                fns[i++] = key;
-            }
-            return fns;
-        }
-
-        private void FlushFields(int totalFields, int[] fieldNums)
-        {
-            PackedInt32s.Writer writer = PackedInt32s.GetWriterNoHeader(vectorsStream, PackedInt32s.Format.PACKED, totalFields, PackedInt32s.BitsRequired(fieldNums.Length - 1), 1);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    int fieldNumIndex = Array.BinarySearch(fieldNums, fd.fieldNum);
-                    Debug.Assert(fieldNumIndex >= 0);
-                    writer.Add(fieldNumIndex);
-                }
-            }
-            writer.Finish();
-        }
-
-        private void FlushFlags(int totalFields, int[] fieldNums)
-        {
-            // check if fields always have the same flags
-            bool nonChangingFlags = true;
-            int[] fieldFlags = new int[fieldNums.Length];
-            Arrays.Fill(fieldFlags, -1);
-            bool breakOuterLoop;
-            foreach (DocData dd in pendingDocs)
-            {
-                breakOuterLoop = false;
-                foreach (FieldData fd in dd.fields)
-                {
-                    int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
-                    Debug.Assert(fieldNumOff >= 0);
-                    if (fieldFlags[fieldNumOff] == -1)
-                    {
-                        fieldFlags[fieldNumOff] = fd.flags;
-                    }
-                    else if (fieldFlags[fieldNumOff] != fd.flags)
-                    {
-                        nonChangingFlags = false;
-                        breakOuterLoop = true;
-                    }
-                }
-                if (breakOuterLoop)
-                    break;
-            }
-
-            if (nonChangingFlags)
-            {
-                // write one flag per field num
-                vectorsStream.WriteVInt32(0);
-                PackedInt32s.Writer writer = PackedInt32s.GetWriterNoHeader(vectorsStream, PackedInt32s.Format.PACKED, fieldFlags.Length, FLAGS_BITS, 1);
-                foreach (int flags in fieldFlags)
-                {
-                    Debug.Assert(flags >= 0);
-                    writer.Add(flags);
-                }
-                Debug.Assert(writer.Ord == fieldFlags.Length - 1);
-                writer.Finish();
-            }
-            else
-            {
-                // write one flag for every field instance
-                vectorsStream.WriteVInt32(1);
-                PackedInt32s.Writer writer = PackedInt32s.GetWriterNoHeader(vectorsStream, PackedInt32s.Format.PACKED, totalFields, FLAGS_BITS, 1);
-                foreach (DocData dd in pendingDocs)
-                {
-                    foreach (FieldData fd in dd.fields)
-                    {
-                        writer.Add(fd.flags);
-                    }
-                }
-                Debug.Assert(writer.Ord == totalFields - 1);
-                writer.Finish();
-            }
-        }
-
-        private void FlushNumTerms(int totalFields)
-        {
-            int maxNumTerms = 0;
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    maxNumTerms |= fd.numTerms;
-                }
-            }
-            int bitsRequired = PackedInt32s.BitsRequired(maxNumTerms);
-            vectorsStream.WriteVInt32(bitsRequired);
-            PackedInt32s.Writer writer = PackedInt32s.GetWriterNoHeader(vectorsStream, PackedInt32s.Format.PACKED, totalFields, bitsRequired, 1);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    writer.Add(fd.numTerms);
-                }
-            }
-            Debug.Assert(writer.Ord == totalFields - 1);
-            writer.Finish();
-        }
-
-        private void FlushTermLengths()
-        {
-            writer.Reset(vectorsStream);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    for (int i = 0; i < fd.numTerms; ++i)
-                    {
-                        writer.Add(fd.prefixLengths[i]);
-                    }
-                }
-            }
-            writer.Finish();
-            writer.Reset(vectorsStream);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    for (int i = 0; i < fd.numTerms; ++i)
-                    {
-                        writer.Add(fd.suffixLengths[i]);
-                    }
-                }
-            }
-            writer.Finish();
-        }
-
-        private void FlushTermFreqs()
-        {
-            writer.Reset(vectorsStream);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    for (int i = 0; i < fd.numTerms; ++i)
-                    {
-                        writer.Add(fd.freqs[i] - 1);
-                    }
-                }
-            }
-            writer.Finish();
-        }
-
-        private void FlushPositions()
-        {
-            writer.Reset(vectorsStream);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    if (fd.hasPositions)
-                    {
-                        int pos = 0;
-                        for (int i = 0; i < fd.numTerms; ++i)
-                        {
-                            int previousPosition = 0;
-                            for (int j = 0; j < fd.freqs[i]; ++j)
-                            {
-                                int position = positionsBuf[fd.posStart + pos++];
-                                writer.Add(position - previousPosition);
-                                previousPosition = position;
-                            }
-                        }
-                        Debug.Assert(pos == fd.totalPositions);
-                    }
-                }
-            }
-            writer.Finish();
-        }
-
-        private void FlushOffsets(int[] fieldNums)
-        {
-            bool hasOffsets = false;
-            long[] sumPos = new long[fieldNums.Length];
-            long[] sumOffsets = new long[fieldNums.Length];
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    hasOffsets |= fd.hasOffsets;
-                    if (fd.hasOffsets && fd.hasPositions)
-                    {
-                        int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
-                        int pos = 0;
-                        for (int i = 0; i < fd.numTerms; ++i)
-                        {
-                            int previousPos = 0;
-                            int previousOff = 0;
-                            for (int j = 0; j < fd.freqs[i]; ++j)
-                            {
-                                int position = positionsBuf[fd.posStart + pos];
-                                int startOffset = startOffsetsBuf[fd.offStart + pos];
-                                sumPos[fieldNumOff] += position - previousPos;
-                                sumOffsets[fieldNumOff] += startOffset - previousOff;
-                                previousPos = position;
-                                previousOff = startOffset;
-                                ++pos;
-                            }
-                        }
-                        Debug.Assert(pos == fd.totalPositions);
-                    }
-                }
-            }
-
-            if (!hasOffsets)
-            {
-                // nothing to do
-                return;
-            }
-
-            float[] charsPerTerm = new float[fieldNums.Length];
-            for (int i = 0; i < fieldNums.Length; ++i)
-            {
-                charsPerTerm[i] = (sumPos[i] <= 0 || sumOffsets[i] <= 0) ? 0 : (float)((double)sumOffsets[i] / sumPos[i]);
-            }
-
-            // start offsets
-            for (int i = 0; i < fieldNums.Length; ++i)
-            {
-                vectorsStream.WriteInt32(Number.SingleToInt32Bits(charsPerTerm[i]));
-            }
-
-            writer.Reset(vectorsStream);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    if ((fd.flags & OFFSETS) != 0)
-                    {
-                        int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
-                        float cpt = charsPerTerm[fieldNumOff];
-                        int pos = 0;
-                        for (int i = 0; i < fd.numTerms; ++i)
-                        {
-                            int previousPos = 0;
-                            int previousOff = 0;
-                            for (int j = 0; j < fd.freqs[i]; ++j)
-                            {
-                                int position = fd.hasPositions ? positionsBuf[fd.posStart + pos] : 0;
-                                int startOffset = startOffsetsBuf[fd.offStart + pos];
-                                writer.Add(startOffset - previousOff - (int)(cpt * (position - previousPos)));
-                                previousPos = position;
-                                previousOff = startOffset;
-                                ++pos;
-                            }
-                        }
-                    }
-                }
-            }
-            writer.Finish();
-
-            // lengths
-            writer.Reset(vectorsStream);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    if ((fd.flags & OFFSETS) != 0)
-                    {
-                        int pos = 0;
-                        for (int i = 0; i < fd.numTerms; ++i)
-                        {
-                            for (int j = 0; j < fd.freqs[i]; ++j)
-                            {
-                                writer.Add(lengthsBuf[fd.offStart + pos++] - fd.prefixLengths[i] - fd.suffixLengths[i]);
-                            }
-                        }
-                        Debug.Assert(pos == fd.totalPositions);
-                    }
-                }
-            }
-            writer.Finish();
-        }
-
-        private void FlushPayloadLengths()
-        {
-            writer.Reset(vectorsStream);
-            foreach (DocData dd in pendingDocs)
-            {
-                foreach (FieldData fd in dd.fields)
-                {
-                    if (fd.hasPayloads)
-                    {
-                        for (int i = 0; i < fd.totalPositions; ++i)
-                        {
-                            writer.Add(payloadLengthsBuf[fd.payStart + i]);
-                        }
-                    }
-                }
-            }
-            writer.Finish();
-        }
-
-        public override void Finish(FieldInfos fis, int numDocs)
-        {
-            if (!(pendingDocs.Count == 0))
-            {
-                Flush();
-            }
-            if (numDocs != this.numDocs)
-            {
-                throw new Exception("Wrote " + this.numDocs + " docs, finish called with numDocs=" + numDocs);
-            }
-            indexWriter.Finish(numDocs, vectorsStream.GetFilePointer());
-            CodecUtil.WriteFooter(vectorsStream);
-        }
-
-        public override IComparer<BytesRef> Comparer
-        {
-            get
-            {
-                return BytesRef.UTF8SortedAsUnicodeComparer;
-            }
-        }
-
-        public override void AddProx(int numProx, DataInput positions, DataInput offsets)
-        {
-            Debug.Assert((curField.hasPositions) == (positions != null));
-            Debug.Assert((curField.hasOffsets) == (offsets != null));
-
-            if (curField.hasPositions)
-            {
-                int posStart = curField.posStart + curField.totalPositions;
-                if (posStart + numProx > positionsBuf.Length)
-                {
-                    positionsBuf = ArrayUtil.Grow(positionsBuf, posStart + numProx);
-                }
-                int position = 0;
-                if (curField.hasPayloads)
-                {
-                    int payStart = curField.payStart + curField.totalPositions;
-                    if (payStart + numProx > payloadLengthsBuf.Length)
-                    {
-                        payloadLengthsBuf = ArrayUtil.Grow(payloadLengthsBuf, payStart + numProx);
-                    }
-                    for (int i = 0; i < numProx; ++i)
-                    {
-                        int code = positions.ReadVInt32();
-                        if ((code & 1) != 0)
-                        {
-                            // this position has a payload
-                            int payloadLength = positions.ReadVInt32();
-                            payloadLengthsBuf[payStart + i] = payloadLength;
-                            payloadBytes.CopyBytes(positions, payloadLength);
-                        }
-                        else
-                        {
-                            payloadLengthsBuf[payStart + i] = 0;
-                        }
-                        position += (int)((uint)code >> 1);
-                        positionsBuf[posStart + i] = position;
-                    }
-                }
-                else
-                {
-                    for (int i = 0; i < numProx; ++i)
-                    {
-                        position += ((int)((uint)positions.ReadVInt32() >> 1));
-                        positionsBuf[posStart + i] = position;
-                    }
-                }
-            }
-
-            if (curField.hasOffsets)
-            {
-                int offStart = curField.offStart + curField.totalPositions;
-                if (offStart + numProx > startOffsetsBuf.Length)
-                {
-                    int newLength = ArrayUtil.Oversize(offStart + numProx, 4);
-                    startOffsetsBuf = Arrays.CopyOf(startOffsetsBuf, newLength);
-                    lengthsBuf = Arrays.CopyOf(lengthsBuf, newLength);
-                }
-                int lastOffset = 0, startOffset, endOffset;
-                for (int i = 0; i < numProx; ++i)
-                {
-                    startOffset = lastOffset + offsets.ReadVInt32();
-                    endOffset = startOffset + offsets.ReadVInt32();
-                    lastOffset = endOffset;
-                    startOffsetsBuf[offStart + i] = startOffset;
-                    lengthsBuf[offStart + i] = endOffset - startOffset;
-                }
-            }
-
-            curField.totalPositions += numProx;
-        }
-
-        public override int Merge(MergeState mergeState)
-        {
-            int docCount = 0;
-            int idx = 0;
-
-            foreach (AtomicReader reader in mergeState.Readers)
-            {
-                SegmentReader matchingSegmentReader = mergeState.MatchingSegmentReaders[idx++];
-                CompressingTermVectorsReader matchingVectorsReader = null;
-                if (matchingSegmentReader != null)
-                {
-                    TermVectorsReader vectorsReader = matchingSegmentReader.TermVectorsReader;
-                    // we can only bulk-copy if the matching reader is also a CompressingTermVectorsReader
-                    if (vectorsReader != null && vectorsReader is CompressingTermVectorsReader)
-                    {
-                        matchingVectorsReader = (CompressingTermVectorsReader)vectorsReader;
-                    }
-                }
-
-                int maxDoc = reader.MaxDoc;
-                IBits liveDocs = reader.LiveDocs;
-
-                if (matchingVectorsReader == null || matchingVectorsReader.Version != VERSION_CURRENT || matchingVectorsReader.CompressionMode != compressionMode || matchingVectorsReader.ChunkSize != chunkSize || matchingVectorsReader.PackedInt32sVersion != PackedInt32s.VERSION_CURRENT)
-                {
-                    // naive merge...
-                    for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = NextLiveDoc(i + 1, liveDocs, maxDoc))
-                    {
-                        Fields vectors = reader.GetTermVectors(i);
-                        AddAllDocVectors(vectors, mergeState);
-                        ++docCount;
-                        mergeState.CheckAbort.Work(300);
-                    }
-                }
-                else
-                {
-                    CompressingStoredFieldsIndexReader index = matchingVectorsReader.Index;
-                    IndexInput vectorsStreamOrig = matchingVectorsReader.VectorsStream;
-                    vectorsStreamOrig.Seek(0);
-                    ChecksumIndexInput vectorsStream = new BufferedChecksumIndexInput((IndexInput)vectorsStreamOrig.Clone());
-
-                    for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; )
-                    {
-                        // We make sure to move the checksum input in any case, otherwise the final
-                        // integrity check might need to read the whole file a second time
-                        long startPointer = index.GetStartPointer(i);
-                        if (startPointer > vectorsStream.GetFilePointer())
-                        {
-                            vectorsStream.Seek(startPointer);
-                        }
-                        if ((pendingDocs.Count == 0) && (i == 0 || index.GetStartPointer(i - 1) < startPointer)) // start of a chunk
-                        {
-                            int docBase = vectorsStream.ReadVInt32();
-                            int chunkDocs = vectorsStream.ReadVInt32();
-                            Debug.Assert(docBase + chunkDocs <= matchingSegmentReader.MaxDoc);
-                            if (docBase + chunkDocs < matchingSegmentReader.MaxDoc && NextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs)
-                            {
-                                long chunkEnd = index.GetStartPointer(docBase + chunkDocs);
-                                long chunkLength = chunkEnd - vectorsStream.GetFilePointer();
-                                indexWriter.WriteIndex(chunkDocs, this.vectorsStream.GetFilePointer());
-                                this.vectorsStream.WriteVInt32(docCount);
-                                this.vectorsStream.WriteVInt32(chunkDocs);
-                                this.vectorsStream.CopyBytes(vectorsStream, chunkLength);
-                                docCount += chunkDocs;
-                                this.numDocs += chunkDocs;
-                                mergeState.CheckAbort.Work(300 * chunkDocs);
-                                i = NextLiveDoc(docBase + chunkDocs, liveDocs, maxDoc);
-                            }
-                            else
-                            {
-                                for (; i < docBase + chunkDocs; i = NextLiveDoc(i + 1, liveDocs, maxDoc))
-                                {
-                                    Fields vectors = reader.GetTermVectors(i);
-                                    AddAllDocVectors(vectors, mergeState);
-                                    ++docCount;
-                                    mergeState.CheckAbort.Work(300);
-                                }
-                            }
-                        }
-                        else
-                        {
-                            Fields vectors = reader.GetTermVectors(i);
-                            AddAllDocVectors(vectors, mergeState);
-                            ++docCount;
-                            mergeState.CheckAbort.Work(300);
-                            i = NextLiveDoc(i + 1, liveDocs, maxDoc);
-                        }
-                    }
-
-                    vectorsStream.Seek(vectorsStream.Length - CodecUtil.FooterLength());
-                    CodecUtil.CheckFooter(vectorsStream);
-                }
-            }
-            Finish(mergeState.FieldInfos, docCount);
-            return docCount;
-        }
-
-        private static int NextLiveDoc(int doc, IBits liveDocs, int maxDoc)
-        {
-            if (liveDocs == null)
-            {
-                return doc;
-            }
-            while (doc < maxDoc && !liveDocs.Get(doc))
-            {
-                ++doc;
-            }
-            return doc;
-        }
-
-        private static int NextDeletedDoc(int doc, IBits liveDocs, int maxDoc)
-        {
-            if (liveDocs == null)
-            {
-                return maxDoc;
-            }
-            while (doc < maxDoc && liveDocs.Get(doc))
-            {
-                ++doc;
-            }
-            return doc;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Compressing/CompressionMode.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Compressing/CompressionMode.cs b/src/Lucene.Net.Core/Codecs/Compressing/CompressionMode.cs
deleted file mode 100644
index ce0857c..0000000
--- a/src/Lucene.Net.Core/Codecs/Compressing/CompressionMode.cs
+++ /dev/null
@@ -1,286 +0,0 @@
-using System.Diagnostics;
-using System.IO;
-using System.IO.Compression;
-using ArrayUtil = Lucene.Net.Util.ArrayUtil;
-using BytesRef = Lucene.Net.Util.BytesRef;
-
-namespace Lucene.Net.Codecs.Compressing
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using CorruptIndexException = Lucene.Net.Index.CorruptIndexException;
-    using DataInput = Lucene.Net.Store.DataInput;
-    using DataOutput = Lucene.Net.Store.DataOutput;
-
-    /// <summary>
-    /// A compression mode. Tells how much effort should be spent on compression and
-    /// decompression of stored fields.
-    /// @lucene.experimental
-    /// </summary>
-    public abstract class CompressionMode
-    {
-        /// <summary>
-        /// A compression mode that trades compression ratio for speed. Although the
-        /// compression ratio might remain high, compression and decompression are
-        /// very fast. Use this mode with indices that have a high update rate but
-        /// should be able to load documents from disk quickly.
-        /// </summary>
-        public static readonly CompressionMode FAST = new CompressionModeAnonymousInnerClassHelper();
-
-        private class CompressionModeAnonymousInnerClassHelper : CompressionMode
-        {
-            public CompressionModeAnonymousInnerClassHelper()
-            {
-            }
-
-            public override Compressor NewCompressor()
-            {
-                return new LZ4FastCompressor();
-            }
-
-            public override Decompressor NewDecompressor()
-            {
-                return LZ4_DECOMPRESSOR;
-            }
-
-            public override string ToString()
-            {
-                return "FAST";
-            }
-        }
-
-        /// <summary>
-        /// A compression mode that trades speed for compression ratio. Although
-        /// compression and decompression might be slow, this compression mode should
-        /// provide a good compression ratio. this mode might be interesting if/when
-        /// your index size is much bigger than your OS cache.
-        /// </summary>
-        public static readonly CompressionMode HIGH_COMPRESSION = new CompressionModeAnonymousInnerClassHelper2();
-
-        private class CompressionModeAnonymousInnerClassHelper2 : CompressionMode
-        {
-            public CompressionModeAnonymousInnerClassHelper2()
-            {
-            }
-
-            public override Compressor NewCompressor()
-            {
-                return new DeflateCompressor(System.IO.Compression.CompressionLevel.Optimal);
-            }
-
-            public override Decompressor NewDecompressor()
-            {
-                return new DeflateDecompressor();
-            }
-
-            public override string ToString()
-            {
-                return "HIGH_COMPRESSION";
-            }
-        }
-
-        /// <summary>
-        /// this compression mode is similar to <seealso cref="#FAST"/> but it spends more time
-        /// compressing in order to improve the compression ratio. this compression
-        /// mode is best used with indices that have a low update rate but should be
-        /// able to load documents from disk quickly.
-        /// </summary>
-        public static readonly CompressionMode FAST_DECOMPRESSION = new CompressionModeAnonymousInnerClassHelper3();
-
-        private class CompressionModeAnonymousInnerClassHelper3 : CompressionMode
-        {
-            public CompressionModeAnonymousInnerClassHelper3()
-            {
-            }
-
-            public override Compressor NewCompressor()
-            {
-                return new LZ4HighCompressor();
-            }
-
-            public override Decompressor NewDecompressor()
-            {
-                return LZ4_DECOMPRESSOR;
-            }
-
-            public override string ToString()
-            {
-                return "FAST_DECOMPRESSION";
-            }
-        }
-
-        /// <summary>
-        /// Sole constructor. </summary>
-        protected internal CompressionMode()
-        {
-        }
-
-        /// <summary>
-        /// Create a new <seealso cref="Compressor"/> instance.
-        /// </summary>
-        public abstract Compressor NewCompressor();
-
-        /// <summary>
-        /// Create a new <seealso cref="Decompressor"/> instance.
-        /// </summary>
-        public abstract Decompressor NewDecompressor();
-
-        private static readonly Decompressor LZ4_DECOMPRESSOR = new DecompressorAnonymousInnerClassHelper();
-
-        private class DecompressorAnonymousInnerClassHelper : Decompressor
-        {
-            public DecompressorAnonymousInnerClassHelper()
-            {
-            }
-
-            public override void Decompress(DataInput @in, int originalLength, int offset, int length, BytesRef bytes)
-            {
-                Debug.Assert(offset + length <= originalLength);
-                // add 7 padding bytes, this is not necessary but can help decompression run faster
-                if (bytes.Bytes.Length < originalLength + 7)
-                {
-                    bytes.Bytes = new byte[ArrayUtil.Oversize(originalLength + 7, 1)];
-                }
-                int decompressedLength = LZ4.Decompress(@in, offset + length, bytes.Bytes, 0);
-                if (decompressedLength > originalLength)
-                {
-                    throw new CorruptIndexException("Corrupted: lengths mismatch: " + decompressedLength + " > " + originalLength + " (resource=" + @in + ")");
-                }
-                bytes.Offset = offset;
-                bytes.Length = length;
-            }
-
-            public override object Clone()
-            {
-                return this;
-            }
-        }
-
-        private sealed class LZ4FastCompressor : Compressor
-        {
-            private readonly LZ4.HashTable ht;
-
-            internal LZ4FastCompressor()
-            {
-                ht = new LZ4.HashTable();
-            }
-
-            public override void Compress(byte[] bytes, int off, int len, DataOutput @out)
-            {
-                LZ4.Compress(bytes, off, len, @out, ht);
-            }
-        }
-
-        private sealed class LZ4HighCompressor : Compressor
-        {
-            internal readonly LZ4.HCHashTable ht;
-
-            internal LZ4HighCompressor()
-            {
-                ht = new LZ4.HCHashTable();
-            }
-
-            public override void Compress(byte[] bytes, int off, int len, DataOutput @out)
-            {
-                LZ4.CompressHC(bytes, off, len, @out, ht);
-            }
-        }
-
-        private sealed class DeflateDecompressor : Decompressor
-        {
-
-            internal DeflateDecompressor()
-            {
-            }
-
-            public override void Decompress(DataInput input, int originalLength, int offset, int length, BytesRef bytes)
-            {
-                Debug.Assert(offset + length <= originalLength);
-                if (length == 0)
-                {
-                    bytes.Length = 0;
-                    return;
-                }
-
-                byte[] compressedBytes = new byte[input.ReadVInt32()];
-                input.ReadBytes(compressedBytes, 0, compressedBytes.Length);
-                byte[] decompressedBytes = null;
-
-                using (MemoryStream decompressedStream = new MemoryStream())
-                {
-                    using (MemoryStream compressedStream = new MemoryStream(compressedBytes))
-                    {
-                        using (DeflateStream dStream = new DeflateStream(compressedStream, System.IO.Compression.CompressionMode.Decompress))
-                        {
-                            dStream.CopyTo(decompressedStream);
-                        }
-                    }
-                    decompressedBytes = decompressedStream.ToArray();
-                }
-
-                if (decompressedBytes.Length != originalLength)
-                {
-                    throw new CorruptIndexException("Length mismatch: " + decompressedBytes.Length + " != " + originalLength + " (resource=" + input + ")");
-                }
-
-                bytes.Bytes = decompressedBytes;
-                bytes.Offset = offset;
-                bytes.Length = length;            
-            }
-
-            public override object Clone()
-            {
-                return new DeflateDecompressor();
-            }
-        }
-
-        private class DeflateCompressor : Compressor
-        {
-            private CompressionLevel compressionLevel;
-            internal DeflateCompressor(CompressionLevel level)
-            {
-                compressionLevel = level;
-            }
-
-            public override void Compress(byte[] bytes, int off, int len, DataOutput output)
-            {
-                byte[] resultArray = null;
-                using (MemoryStream compressionMemoryStream = new MemoryStream())
-                {
-                    using (DeflateStream deflateStream = new DeflateStream(compressionMemoryStream, compressionLevel))
-                    {
-                        deflateStream.Write(bytes, off, len);
-                    }
-                    resultArray = compressionMemoryStream.ToArray();
-                }
-
-                if (resultArray.Length == 0)
-                {
-                    Debug.Assert(len == 0, len.ToString());
-                    output.WriteVInt32(0);
-                    return;
-                }
-                else
-                {
-                    output.WriteVInt32(resultArray.Length);
-                    output.WriteBytes(resultArray, resultArray.Length);
-                }
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Compressing/Compressor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Compressing/Compressor.cs b/src/Lucene.Net.Core/Codecs/Compressing/Compressor.cs
deleted file mode 100644
index 666e90a..0000000
--- a/src/Lucene.Net.Core/Codecs/Compressing/Compressor.cs
+++ /dev/null
@@ -1,40 +0,0 @@
-namespace Lucene.Net.Codecs.Compressing
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using DataOutput = Lucene.Net.Store.DataOutput;
-
-    /// <summary>
-    /// A data compressor.
-    /// </summary>
-    public abstract class Compressor
-    {
-        /// <summary>
-        /// Sole constructor, typically called from sub-classes. </summary>
-        protected internal Compressor()
-        {
-        }
-
-        /// <summary>
-        /// Compress bytes into <code>out</code>. It it the responsibility of the
-        /// compressor to add all necessary information so that a <seealso cref="Decompressor"/>
-        /// will know when to stop decompressing bytes from the stream.
-        /// </summary>
-        public abstract void Compress(byte[] bytes, int off, int len, DataOutput @out);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Compressing/Decompressor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Compressing/Decompressor.cs b/src/Lucene.Net.Core/Codecs/Compressing/Decompressor.cs
deleted file mode 100644
index d1e0641..0000000
--- a/src/Lucene.Net.Core/Codecs/Compressing/Decompressor.cs
+++ /dev/null
@@ -1,52 +0,0 @@
-using BytesRef = Lucene.Net.Util.BytesRef;
-
-namespace Lucene.Net.Codecs.Compressing
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using DataInput = Lucene.Net.Store.DataInput;
-
-    /// <summary>
-    /// A decompressor.
-    /// </summary>
-    public abstract class Decompressor
-    {
-        /// <summary>
-        /// Sole constructor, typically called from sub-classes. </summary>
-        protected internal Decompressor()
-        {
-        }
-
-        /// <summary>
-        /// Decompress bytes that were stored between offsets <code>offset</code> and
-        /// <code>offset+length</code> in the original stream from the compressed
-        /// stream <code>in</code> to <code>bytes</code>. After returning, the length
-        /// of <code>bytes</code> (<code>bytes.length</code>) must be equal to
-        /// <code>length</code>. Implementations of this method are free to resize
-        /// <code>bytes</code> depending on their needs.
-        /// </summary>
-        /// <param name="in"> the input that stores the compressed stream </param>
-        /// <param name="originalLength"> the length of the original data (before compression) </param>
-        /// <param name="offset"> bytes before this offset do not need to be decompressed </param>
-        /// <param name="length"> bytes after <code>offset+length</code> do not need to be decompressed </param>
-        /// <param name="bytes"> a <seealso cref="BytesRef"/> where to store the decompressed data </param>
-        public abstract void Decompress(DataInput @in, int originalLength, int offset, int length, BytesRef bytes);
-
-        public abstract object Clone();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Codecs/Compressing/LZ4.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/Compressing/LZ4.cs b/src/Lucene.Net.Core/Codecs/Compressing/LZ4.cs
deleted file mode 100644
index cf05994..0000000
--- a/src/Lucene.Net.Core/Codecs/Compressing/LZ4.cs
+++ /dev/null
@@ -1,645 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Diagnostics;
-
-namespace Lucene.Net.Codecs.Compressing
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using DataInput = Lucene.Net.Store.DataInput;
-    using DataOutput = Lucene.Net.Store.DataOutput;
-    using PackedInt32s = Lucene.Net.Util.Packed.PackedInt32s;
-
-    /// <summary>
-    /// LZ4 compression and decompression routines.
-    ///
-    /// http://code.google.com/p/lz4/
-    /// http://fastcompression.blogspot.fr/p/lz4.html
-    /// </summary>
-    public sealed class LZ4
-    {
-        private LZ4()
-        {
-        }
-
-        internal const int MEMORY_USAGE = 14;
-        internal const int MIN_MATCH = 4; // minimum length of a match
-        internal static readonly int MAX_DISTANCE = 1 << 16; // maximum distance of a reference
-        internal const int LAST_LITERALS = 5; // the last 5 bytes must be encoded as literals
-        internal const int HASH_LOG_HC = 15; // log size of the dictionary for compressHC
-        internal static readonly int HASH_TABLE_SIZE_HC = 1 << HASH_LOG_HC;
-        internal static readonly int OPTIMAL_ML = 0x0F + 4 - 1; // match length that doesn't require an additional byte
-
-        private static int Hash(int i, int hashBits)
-        {
-            return Number.URShift((i * -1640531535), (32 - hashBits));
-        }
-
-        private static int HashHC(int i)
-        {
-            return Hash(i, HASH_LOG_HC);
-        }
-
-        /// <summary>
-        /// NOTE: This was readInt() in Lucene
-        /// </summary>
-        private static int ReadInt32(byte[] buf, int i)
-        {
-            return ((((sbyte)buf[i]) & 0xFF) << 24) | ((((sbyte)buf[i + 1]) & 0xFF) << 16) | ((((sbyte)buf[i + 2]) & 0xFF) << 8) |
-                (((sbyte)buf[i + 3]) & 0xFF);
-        }
-
-        /// <summary>
-        /// NOTE: This was readIntEquals() in Lucene
-        /// </summary>
-        private static bool ReadInt32Equals(byte[] buf, int i, int j)
-        {
-            return ReadInt32(buf, i) == ReadInt32(buf, j);
-        }
-
-        private static int CommonBytes(byte[] b, int o1, int o2, int limit)
-        {
-            Debug.Assert(o1 < o2);
-            int count = 0;
-            while (o2 < limit && b[o1++] == b[o2++])
-            {
-                ++count;
-            }
-            return count;
-        }
-
-        private static int CommonBytesBackward(byte[] b, int o1, int o2, int l1, int l2)
-        {
-            int count = 0;
-            while (o1 > l1 && o2 > l2 && b[--o1] == b[--o2])
-            {
-                ++count;
-            }
-            return count;
-        }
-
-        /// <summary>
-        /// Decompress at least <code>decompressedLen</code> bytes into
-        /// <code>dest[dOff:]</code>. Please note that <code>dest</code> must be large
-        /// enough to be able to hold <b>all</b> decompressed data (meaning that you
-        /// need to know the total decompressed length).
-        /// </summary>
-        public static int Decompress(DataInput compressed, int decompressedLen, byte[] dest, int dOff)
-        {
-            int destEnd = dest.Length;
-
-            do
-            {
-                // literals
-                int token = compressed.ReadByte() & 0xFF;
-                int literalLen = (int)(((uint)token) >> 4);
-
-                if (literalLen != 0)
-                {
-                    if (literalLen == 0x0F)
-                    {
-                        byte len;
-                        while ((len = compressed.ReadByte()) == 0xFF)
-                        {
-                            literalLen += 0xFF;
-                        }
-                        literalLen += len & 0xFF;
-                    }
-                    compressed.ReadBytes(dest, dOff, literalLen);
-                    dOff += literalLen;
-                }
-
-                if (dOff >= decompressedLen)
-                {
-                    break;
-                }
-
-                // matchs
-                var byte1 = compressed.ReadByte();
-                var byte2 = compressed.ReadByte();
-                int matchDec = (byte1 & 0xFF) | ((byte2 & 0xFF) << 8);
-                Debug.Assert(matchDec > 0);
-
-                int matchLen = token & 0x0F;
-                if (matchLen == 0x0F)
-                {
-                    int len;
-                    while ((len = compressed.ReadByte()) == 0xFF)
-                    {
-                        matchLen += 0xFF;
-                    }
-                    matchLen += len & 0xFF;
-                }
-                matchLen += MIN_MATCH;
-
-                // copying a multiple of 8 bytes can make decompression from 5% to 10% faster
-                int fastLen = (int)((matchLen + 7) & 0xFFFFFFF8);
-                if (matchDec < matchLen || dOff + fastLen > destEnd)
-                {
-                    // overlap -> naive incremental copy
-                    for (int @ref = dOff - matchDec, end = dOff + matchLen; dOff < end; ++@ref, ++dOff)
-                    {
-                        dest[dOff] = dest[@ref];
-                    }
-                }
-                else
-                {
-                    // no overlap -> arraycopy
-                    Array.Copy(dest, dOff - matchDec, dest, dOff, fastLen);
-                    dOff += matchLen;
-                }
-            } while (dOff < decompressedLen);
-
-            return dOff;
-        }
-
-        private static void EncodeLen(int l, DataOutput @out)
-        {
-            while (l >= 0xFF)
-            {
-                @out.WriteByte(unchecked((byte)(sbyte)0xFF));
-                l -= 0xFF;
-            }
-            @out.WriteByte((byte)(sbyte)l);
-        }
-
-        private static void EncodeLiterals(byte[] bytes, int token, int anchor, int literalLen, DataOutput @out)
-        {
-            @out.WriteByte((byte)(sbyte)token);
-
-            // encode literal length
-            if (literalLen >= 0x0F)
-            {
-                EncodeLen(literalLen - 0x0F, @out);
-            }
-
-            // encode literals
-            @out.WriteBytes(bytes, anchor, literalLen);
-        }
-
-        private static void EncodeLastLiterals(byte[] bytes, int anchor, int literalLen, DataOutput @out)
-        {
-            int token = Math.Min(literalLen, 0x0F) << 4;
-            EncodeLiterals(bytes, token, anchor, literalLen, @out);
-        }
-
-        private static void EncodeSequence(byte[] bytes, int anchor, int matchRef, int matchOff, int matchLen, DataOutput @out)
-        {
-            int literalLen = matchOff - anchor;
-            Debug.Assert(matchLen >= 4);
-            // encode token
-            int token = (Math.Min(literalLen, 0x0F) << 4) | Math.Min(matchLen - 4, 0x0F);
-            EncodeLiterals(bytes, token, anchor, literalLen, @out);
-
-            // encode match dec
-            int matchDec = matchOff - matchRef;
-            Debug.Assert(matchDec > 0 && matchDec < 1 << 16);
-            @out.WriteByte((byte)(sbyte)matchDec);
-            @out.WriteByte((byte)(sbyte)((int)((uint)matchDec >> 8)));
-
-            // encode match len
-            if (matchLen >= MIN_MATCH + 0x0F)
-            {
-                EncodeLen(matchLen - 0x0F - MIN_MATCH, @out);
-            }
-        }
-
-        public sealed class HashTable
-        {
-            internal int hashLog;
-            internal PackedInt32s.Mutable hashTable;
-
-            internal void Reset(int len)
-            {
-                int bitsPerOffset = PackedInt32s.BitsRequired(len - LAST_LITERALS);
-                int bitsPerOffsetLog = 32 - Number.NumberOfLeadingZeros(bitsPerOffset - 1);
-                hashLog = MEMORY_USAGE + 3 - bitsPerOffsetLog;
-                if (hashTable == null || hashTable.Count < 1 << hashLog || hashTable.BitsPerValue < bitsPerOffset)
-                {
-                    hashTable = PackedInt32s.GetMutable(1 << hashLog, bitsPerOffset, PackedInt32s.DEFAULT);
-                }
-                else
-                {
-                    hashTable.Clear();
-                }
-            }
-        }
-
-        /// <summary>
-        /// Compress <code>bytes[off:off+len]</code> into <code>out</code> using
-        /// at most 16KB of memory. <code>ht</code> shouldn't be shared across threads
-        /// but can safely be reused.
-        /// </summary>
-        public static void Compress(byte[] bytes, int off, int len, DataOutput @out, HashTable ht)
-        {
-            int @base = off;
-            int end = off + len;
-
-            int anchor = off++;
-
-            if (len > LAST_LITERALS + MIN_MATCH)
-            {
-                int limit = end - LAST_LITERALS;
-                int matchLimit = limit - MIN_MATCH;
-                ht.Reset(len);
-                int hashLog = ht.hashLog;
-                PackedInt32s.Mutable hashTable = ht.hashTable;
-
-                while (off <= limit)
-                {
-                    // find a match
-                    int @ref;
-                    while (true)
-                    {
-                        if (off >= matchLimit)
-                        {
-                            goto mainBreak;
-                        }
-                        int v = ReadInt32(bytes, off);
-                        int h = Hash(v, hashLog);
-                        @ref = @base + (int)hashTable.Get(h);
-                        Debug.Assert(PackedInt32s.BitsRequired(off - @base) <= hashTable.BitsPerValue);
-                        hashTable.Set(h, off - @base);
-                        if (off - @ref < MAX_DISTANCE && ReadInt32(bytes, @ref) == v)
-                        {
-                            break;
-                        }
-                        ++off;
-                    }
-
-                    // compute match length
-                    int matchLen = MIN_MATCH + CommonBytes(bytes, @ref + MIN_MATCH, off + MIN_MATCH, limit);
-
-                    EncodeSequence(bytes, anchor, @ref, off, matchLen, @out);
-                    off += matchLen;
-                    anchor = off;
-                //mainContinue: ; // LUCENENET NOTE: Not Referenced
-                }
-            mainBreak: ;
-            }
-
-            // last literals
-            int literalLen = end - anchor;
-            Debug.Assert(literalLen >= LAST_LITERALS || literalLen == len);
-            EncodeLastLiterals(bytes, anchor, end - anchor, @out);
-        }
-
-        public class Match
-        {
-            internal int start, @ref, len;
-
-            internal virtual void Fix(int correction)
-            {
-                start += correction;
-                @ref += correction;
-                len -= correction;
-            }
-
-            internal virtual int End()
-            {
-                return start + len;
-            }
-        }
-
-        private static void CopyTo(Match m1, Match m2)
-        {
-            m2.len = m1.len;
-            m2.start = m1.start;
-            m2.@ref = m1.@ref;
-        }
-
-        public sealed class HCHashTable
-        {
-            internal const int MAX_ATTEMPTS = 256;
-            internal static readonly int MASK = MAX_DISTANCE - 1;
-            internal int nextToUpdate;
-            private int @base;
-            private readonly int[] hashTable;
-            private readonly short[] chainTable;
-
-            internal HCHashTable()
-            {
-                hashTable = new int[HASH_TABLE_SIZE_HC];
-                chainTable = new short[MAX_DISTANCE];
-            }
-
-            internal void Reset(int @base)
-            {
-                this.@base = @base;
-                nextToUpdate = @base;
-                Arrays.Fill(hashTable, -1);
-                Arrays.Fill(chainTable, (short)0);
-            }
-
-            private int HashPointer(byte[] bytes, int off)
-            {
-                int v = ReadInt32(bytes, off);
-                int h = HashHC(v);
-                return hashTable[h];
-            }
-
-            private int Next(int off)
-            {
-                return off - (chainTable[off & MASK] & 0xFFFF);
-            }
-
-            private void AddHash(byte[] bytes, int off)
-            {
-                int v = ReadInt32(bytes, off);
-                int h = HashHC(v);
-                int delta = off - hashTable[h];
-                Debug.Assert(delta > 0, delta.ToString());
-                if (delta >= MAX_DISTANCE)
-                {
-                    delta = MAX_DISTANCE - 1;
-                }
-                chainTable[off & MASK] = (short)delta;
-                hashTable[h] = off;
-            }
-
-            internal void Insert(int off, byte[] bytes)
-            {
-                for (; nextToUpdate < off; ++nextToUpdate)
-                {
-                    AddHash(bytes, nextToUpdate);
-                }
-            }
-
-            internal bool InsertAndFindBestMatch(byte[] buf, int off, int matchLimit, Match match)
-            {
-                match.start = off;
-                match.len = 0;
-                int delta = 0;
-                int repl = 0;
-
-                Insert(off, buf);
-
-                int @ref = HashPointer(buf, off);
-
-                if (@ref >= off - 4 && @ref <= off && @ref >= @base) // potential repetition
-                {
-                    if (ReadInt32Equals(buf, @ref, off)) // confirmed
-                    {
-                        delta = off - @ref;
-                        repl = match.len = MIN_MATCH + CommonBytes(buf, @ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
-                        match.@ref = @ref;
-                    }
-                    @ref = Next(@ref);
-                }
-
-                for (int i = 0; i < MAX_ATTEMPTS; ++i)
-                {
-                    if (@ref < Math.Max(@base, off - MAX_DISTANCE + 1) || @ref > off)
-                    {
-                        break;
-                    }
-                    if (buf[@ref + match.len] == buf[off + match.len] && ReadInt32Equals(buf, @ref, off))
-                    {
-                        int matchLen = MIN_MATCH + CommonBytes(buf, @ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
-                        if (matchLen > match.len)
-                        {
-                            match.@ref = @ref;
-                            match.len = matchLen;
-                        }
-                    }
-                    @ref = Next(@ref);
-                }
-
-                if (repl != 0)
-                {
-                    int ptr = off;
-                    int end = off + repl - (MIN_MATCH - 1);
-                    while (ptr < end - delta)
-                    {
-                        chainTable[ptr & MASK] = (short)delta; // pre load
-                        ++ptr;
-                    }
-                    do
-                    {
-                        chainTable[ptr & MASK] = (short)delta;
-                        hashTable[HashHC(ReadInt32(buf, ptr))] = ptr;
-                        ++ptr;
-                    } while (ptr < end);
-                    nextToUpdate = end;
-                }
-
-                return match.len != 0;
-            }
-
-            internal bool InsertAndFindWiderMatch(byte[] buf, int off, int startLimit, int matchLimit, int minLen, Match match)
-            {
-                match.len = minLen;
-
-                Insert(off, buf);
-
-                int delta = off - startLimit;
-                int @ref = HashPointer(buf, off);
-                for (int i = 0; i < MAX_ATTEMPTS; ++i)
-                {
-                    if (@ref < Math.Max(@base, off - MAX_DISTANCE + 1) || @ref > off)
-                    {
-                        break;
-                    }
-                    if (buf[@ref - delta + match.len] == buf[startLimit + match.len] && ReadInt32Equals(buf, @ref, off))
-                    {
-                        int matchLenForward = MIN_MATCH + CommonBytes(buf, @ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
-                        int matchLenBackward = CommonBytesBackward(buf, @ref, off, @base, startLimit);
-                        int matchLen = matchLenBackward + matchLenForward;
-                        if (matchLen > match.len)
-                        {
-                            match.len = matchLen;
-                            match.@ref = @ref - matchLenBackward;
-                            match.start = off - matchLenBackward;
-                        }
-                    }
-                    @ref = Next(@ref);
-                }
-
-                return match.len > minLen;
-            }
-        }
-
-        /// <summary>
-        /// Compress <code>bytes[off:off+len]</code> into <code>out</code>. Compared to
-        /// <seealso cref="LZ4#compress(byte[], int, int, DataOutput, HashTable)"/>, this method
-        /// is slower and uses more memory (~ 256KB per thread) but should provide
-        /// better compression ratios (especially on large inputs) because it chooses
-        /// the best match among up to 256 candidates and then performs trade-offs to
-        /// fix overlapping matches. <code>ht</code> shouldn't be shared across threads
-        /// but can safely be reused.
-        /// </summary>
-        public static void CompressHC(byte[] src, int srcOff, int srcLen, DataOutput @out, HCHashTable ht)
-        {
-            int srcEnd = srcOff + srcLen;
-            int matchLimit = srcEnd - LAST_LITERALS;
-            int mfLimit = matchLimit - MIN_MATCH;
-
-            int sOff = srcOff;
-            int anchor = sOff++;
-
-            ht.Reset(srcOff);
-            Match match0 = new Match();
-            Match match1 = new Match();
-            Match match2 = new Match();
-            Match match3 = new Match();
-
-            while (sOff <= mfLimit)
-            {
-                if (!ht.InsertAndFindBestMatch(src, sOff, matchLimit, match1))
-                {
-                    ++sOff;
-                    continue;
-                }
-
-                // saved, in case we would skip too much
-                CopyTo(match1, match0);
-
-                while (true)
-                {
-                    Debug.Assert(match1.start >= anchor);
-                    if (match1.End() >= mfLimit || !ht.InsertAndFindWiderMatch(src, match1.End() - 2, match1.start + 1, matchLimit, match1.len, match2))
-                    {
-                        // no better match
-                        EncodeSequence(src, anchor, match1.@ref, match1.start, match1.len, @out);
-                        anchor = sOff = match1.End();
-                        goto mainContinue;
-                    }
-
-                    if (match0.start < match1.start)
-                    {
-                        if (match2.start < match1.start + match0.len) // empirical
-                        {
-                            CopyTo(match0, match1);
-                        }
-                    }
-                    Debug.Assert(match2.start > match1.start);
-
-                    if (match2.start - match1.start < 3) // First Match too small : removed
-                    {
-                        CopyTo(match2, match1);
-                        goto search2Continue;
-                    }
-
-                    while (true)
-                    {
-                        if (match2.start - match1.start < OPTIMAL_ML)
-                        {
-                            int newMatchLen = match1.len;
-                            if (newMatchLen > OPTIMAL_ML)
-                            {
-                                newMatchLen = OPTIMAL_ML;
-                            }
-                            if (match1.start + newMatchLen > match2.End() - MIN_MATCH)
-                            {
-                                newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH;
-                            }
-                            int correction = newMatchLen - (match2.start - match1.start);
-                            if (correction > 0)
-                            {
-                                match2.Fix(correction);
-                            }
-                        }
-
-                        if (match2.start + match2.len >= mfLimit || !ht.InsertAndFindWiderMatch(src, match2.End() - 3, match2.start, matchLimit, match2.len, match3))
-                        {
-                            // no better match -> 2 sequences to encode
-                            if (match2.start < match1.End())
-                            {
-                                match1.len = match2.start - match1.start;
-                            }
-                            // encode seq 1
-                            EncodeSequence(src, anchor, match1.@ref, match1.start, match1.len, @out);
-                            anchor = sOff = match1.End();
-                            // encode seq 2
-                            EncodeSequence(src, anchor, match2.@ref, match2.start, match2.len, @out);
-                            anchor = sOff = match2.End();
-                            goto mainContinue;
-                        }
-
-                        if (match3.start < match1.End() + 3) // Not enough space for match 2 : remove it
-                        {
-                            if (match3.start >= match1.End()) // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
-                            {
-                                if (match2.start < match1.End())
-                                {
-                                    int correction = match1.End() - match2.start;
-                                    match2.Fix(correction);
-                                    if (match2.len < MIN_MATCH)
-                                    {
-                                        CopyTo(match3, match2);
-                                    }
-                                }
-
-                                EncodeSequence(src, anchor, match1.@ref, match1.start, match1.len, @out);
-                                anchor = sOff = match1.End();
-
-                                CopyTo(match3, match1);
-                                CopyTo(match2, match0);
-
-                                goto search2Continue;
-                            }
-
-                            CopyTo(match3, match2);
-                            goto search3Continue;
-                        }
-
-                        // OK, now we have 3 ascending matches; let's write at least the first one
-                        if (match2.start < match1.End())
-                        {
-                            if (match2.start - match1.start < 0x0F)
-                            {
-                                if (match1.len > OPTIMAL_ML)
-                                {
-                                    match1.len = OPTIMAL_ML;
-                                }
-                                if (match1.End() > match2.End() - MIN_MATCH)
-                                {
-                                    match1.len = match2.End() - match1.start - MIN_MATCH;
-                                }
-                                int correction = match1.End() - match2.start;
-                                match2.Fix(correction);
-                            }
-                            else
-                            {
-                                match1.len = match2.start - match1.start;
-                            }
-                        }
-
-                        EncodeSequence(src, anchor, match1.@ref, match1.start, match1.len, @out);
-                        anchor = sOff = match1.End();
-
-                        CopyTo(match2, match1);
-                        CopyTo(match3, match2);
-
-                        goto search3Continue;
-                    search3Continue: ;
-                    }
-                //search3Break: ; // LUCENENET NOTE: Unreachable
-
-                search2Continue: ;
-                }
-            //search2Break: ; // LUCENENET NOTE: Not referenced
-
-            mainContinue: ;
-            }
-        //mainBreak: // LUCENENET NOTE: Not referenced
-
-            EncodeLastLiterals(src, anchor, srcEnd - anchor, @out);
-        }
-    }
-}
\ No newline at end of file


Mime
View raw message