lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [02/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:08 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TermsHash.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TermsHash.cs b/src/Lucene.Net.Core/Index/TermsHash.cs
deleted file mode 100644
index 77c2d6e..0000000
--- a/src/Lucene.Net.Core/Index/TermsHash.cs
+++ /dev/null
@@ -1,165 +0,0 @@
-using System;
-using System.Collections.Generic;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using ByteBlockPool = Lucene.Net.Util.ByteBlockPool;
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using Counter = Lucene.Net.Util.Counter;
-    using Int32BlockPool = Lucene.Net.Util.Int32BlockPool;
-
-    /// <summary>
-    /// this class implements <seealso cref="InvertedDocConsumer"/>, which
-    ///  is passed each token produced by the analyzer on each
-    ///  field.  It stores these tokens in a hash table, and
-    ///  allocates separate byte streams per token.  Consumers of
-    ///  this class, eg <seealso cref="FreqProxTermsWriter"/> and {@link
-    ///  TermVectorsConsumer}, write their own byte streams
-    ///  under each term.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal sealed class TermsHash : InvertedDocConsumer
-    {
-        internal readonly TermsHashConsumer consumer;
-        internal readonly TermsHash nextTermsHash;
-
-        internal readonly Int32BlockPool intPool;
-        internal readonly ByteBlockPool bytePool;
-        internal ByteBlockPool termBytePool;
-        internal readonly Counter bytesUsed;
-
-        internal readonly bool primary;
-        internal readonly DocumentsWriterPerThread.DocState docState;
-
-        // Used when comparing postings via termRefComp, in TermsHashPerField
-        internal readonly BytesRef tr1 = new BytesRef();
-
-        internal readonly BytesRef tr2 = new BytesRef();
-
-        // Used by perField to obtain terms from the analysis chain
-        internal readonly BytesRef termBytesRef = new BytesRef(10);
-
-        internal readonly bool trackAllocations;
-
-        public TermsHash(DocumentsWriterPerThread docWriter, TermsHashConsumer consumer, bool trackAllocations, TermsHash nextTermsHash)
-        {
-            this.docState = docWriter.docState;
-            this.consumer = consumer;
-            this.trackAllocations = trackAllocations;
-            this.nextTermsHash = nextTermsHash;
-            this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.NewCounter();
-            intPool = new Int32BlockPool(docWriter.intBlockAllocator);
-            bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);
-
-            if (nextTermsHash != null)
-            {
-                // We are primary
-                primary = true;
-                termBytePool = bytePool;
-                nextTermsHash.termBytePool = bytePool;
-            }
-            else
-            {
-                primary = false;
-            }
-        }
-
-        public override void Abort()
-        {
-            Reset();
-            try
-            {
-                consumer.Abort();
-            }
-            finally
-            {
-                if (nextTermsHash != null)
-                {
-                    nextTermsHash.Abort();
-                }
-            }
-        }
-
-        // Clear all state
-        internal void Reset()
-        {
-            // we don't reuse so we drop everything and don't fill with 0
-            intPool.Reset(false, false);
-            bytePool.Reset(false, false);
-        }
-
-        internal override void Flush(IDictionary<string, InvertedDocConsumerPerField> fieldsToFlush, SegmentWriteState state)
-        {
-            IDictionary<string, TermsHashConsumerPerField> childFields = new Dictionary<string, TermsHashConsumerPerField>();
-            IDictionary<string, InvertedDocConsumerPerField> nextChildFields;
-
-            if (nextTermsHash != null)
-            {
-                nextChildFields = new Dictionary<string, InvertedDocConsumerPerField>();
-            }
-            else
-            {
-                nextChildFields = null;
-            }
-
-            foreach (KeyValuePair<string, InvertedDocConsumerPerField> entry in fieldsToFlush)
-            {
-                TermsHashPerField perField = (TermsHashPerField)entry.Value;
-                childFields[entry.Key] = perField.consumer;
-                if (nextTermsHash != null)
-                {
-                    nextChildFields[entry.Key] = perField.nextPerField;
-                }
-            }
-
-            consumer.Flush(childFields, state);
-
-            if (nextTermsHash != null)
-            {
-                nextTermsHash.Flush(nextChildFields, state);
-            }
-        }
-
-        internal override InvertedDocConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo)
-        {
-            return new TermsHashPerField(docInverterPerField, this, nextTermsHash, fieldInfo);
-        }
-
-        internal override void FinishDocument()
-        {
-            consumer.FinishDocument(this);
-            if (nextTermsHash != null)
-            {
-                nextTermsHash.consumer.FinishDocument(nextTermsHash);
-            }
-        }
-
-        internal override void StartDocument()
-        {
-            consumer.StartDocument();
-            if (nextTermsHash != null)
-            {
-                nextTermsHash.consumer.StartDocument();
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TermsHashConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TermsHashConsumer.cs b/src/Lucene.Net.Core/Index/TermsHashConsumer.cs
deleted file mode 100644
index cfecc98..0000000
--- a/src/Lucene.Net.Core/Index/TermsHashConsumer.cs
+++ /dev/null
@@ -1,37 +0,0 @@
-using System;
-using System.Collections.Generic;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal abstract class TermsHashConsumer
-    {
-        public abstract void Flush(IDictionary<string, TermsHashConsumerPerField> fieldsToFlush, SegmentWriteState state);
-
-        public abstract void Abort();
-
-        internal abstract void StartDocument();
-
-        internal abstract void FinishDocument(TermsHash termsHash);
-
-        public abstract TermsHashConsumerPerField AddField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TermsHashConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TermsHashConsumerPerField.cs b/src/Lucene.Net.Core/Index/TermsHashConsumerPerField.cs
deleted file mode 100644
index 3888c01..0000000
--- a/src/Lucene.Net.Core/Index/TermsHashConsumerPerField.cs
+++ /dev/null
@@ -1,51 +0,0 @@
-using Lucene.Net.Support;
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// Implement this class to plug into the TermsHash
-    ///  processor, which inverts & stores Tokens into a hash
-    ///  table and provides an API for writing bytes into
-    ///  multiple streams for each unique Token.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal abstract class TermsHashConsumerPerField
-    {
-        internal abstract bool Start(IIndexableField[] fields, int count);
-
-        internal abstract void Finish();
-
-        [ExceptionToNetNumericConvention]
-        internal abstract void SkippingLongTerm();
-
-        internal abstract void Start(IIndexableField field);
-
-        internal abstract void NewTerm(int termID);
-
-        internal abstract void AddTerm(int termID);
-
-        internal abstract int StreamCount { get; }
-
-        internal abstract ParallelPostingsArray CreatePostingsArray(int size);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TermsHashPerField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TermsHashPerField.cs b/src/Lucene.Net.Core/Index/TermsHashPerField.cs
deleted file mode 100644
index 65fd5f5..0000000
--- a/src/Lucene.Net.Core/Index/TermsHashPerField.cs
+++ /dev/null
@@ -1,388 +0,0 @@
-using Lucene.Net.Analysis.TokenAttributes;
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using ByteBlockPool = Lucene.Net.Util.ByteBlockPool;
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using BytesRefHash = Lucene.Net.Util.BytesRefHash;
-    using Counter = Lucene.Net.Util.Counter;
-    using Int32BlockPool = Lucene.Net.Util.Int32BlockPool;
-
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal sealed class TermsHashPerField : InvertedDocConsumerPerField
-    {
-        private const int HASH_INIT_SIZE = 4;
-
-        internal readonly TermsHashConsumerPerField consumer;
-
-        internal readonly TermsHash termsHash;
-
-        internal readonly TermsHashPerField nextPerField;
-        internal readonly DocumentsWriterPerThread.DocState docState;
-        internal readonly FieldInvertState fieldState;
-        internal ITermToBytesRefAttribute termAtt;
-        internal BytesRef termBytesRef;
-
-        // Copied from our perThread
-        internal readonly Int32BlockPool intPool;
-
-        internal readonly ByteBlockPool bytePool;
-        internal readonly ByteBlockPool termBytePool;
-
-        internal readonly int streamCount;
-        internal readonly int numPostingInt;
-
-        internal readonly FieldInfo fieldInfo;
-
-        internal readonly BytesRefHash bytesHash;
-
-        internal ParallelPostingsArray postingsArray;
-        private readonly Counter bytesUsed;
-
-        public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHash termsHash, TermsHash nextTermsHash, FieldInfo fieldInfo)
-        {
-            intPool = termsHash.intPool;
-            bytePool = termsHash.bytePool;
-            termBytePool = termsHash.termBytePool;
-            docState = termsHash.docState;
-            this.termsHash = termsHash;
-            bytesUsed = termsHash.bytesUsed;
-            fieldState = docInverterPerField.fieldState;
-            this.consumer = termsHash.consumer.AddField(this, fieldInfo);
-            PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
-            bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
-            streamCount = consumer.StreamCount;
-            numPostingInt = 2 * streamCount;
-            this.fieldInfo = fieldInfo;
-            if (nextTermsHash != null)
-            {
-                nextPerField = (TermsHashPerField)nextTermsHash.AddField(docInverterPerField, fieldInfo);
-            }
-            else
-            {
-                nextPerField = null;
-            }
-        }
-
-        internal void ShrinkHash(int targetSize)
-        {
-            // Fully free the bytesHash on each flush but keep the pool untouched
-            // bytesHash.clear will clear the ByteStartArray and in turn the ParallelPostingsArray too
-            bytesHash.Clear(false);
-        }
-
-        public void Reset()
-        {
-            bytesHash.Clear(false);
-            if (nextPerField != null)
-            {
-                nextPerField.Reset();
-            }
-        }
-
-        public override void Abort()
-        {
-            Reset();
-            if (nextPerField != null)
-            {
-                nextPerField.Abort();
-            }
-        }
-
-        public void InitReader(ByteSliceReader reader, int termID, int stream)
-        {
-            Debug.Assert(stream < streamCount);
-            int intStart = postingsArray.intStarts[termID];
-            int[] ints = intPool.Buffers[intStart >> Int32BlockPool.INT32_BLOCK_SHIFT];
-            int upto = intStart & Int32BlockPool.INT32_BLOCK_MASK;
-            reader.Init(bytePool, postingsArray.byteStarts[termID] + stream * ByteBlockPool.FIRST_LEVEL_SIZE, ints[upto + stream]);
-        }
-
-        /// <summary>
-        /// Collapse the hash table & sort in-place. </summary>
-        public int[] SortPostings(IComparer<BytesRef> termComp)
-        {
-            return bytesHash.Sort(termComp);
-        }
-
-        private bool doCall;
-        private bool doNextCall;
-
-        internal override void Start(IIndexableField f)
-        {
-            termAtt = fieldState.AttributeSource.GetAttribute<ITermToBytesRefAttribute>();
-            termBytesRef = termAtt.BytesRef;
-            consumer.Start(f);
-            if (nextPerField != null)
-            {
-                nextPerField.Start(f);
-            }
-        }
-
-        internal override bool Start(IIndexableField[] fields, int count)
-        {
-            doCall = consumer.Start(fields, count);
-            bytesHash.Reinit();
-            if (nextPerField != null)
-            {
-                doNextCall = nextPerField.Start(fields, count);
-            }
-            return doCall || doNextCall;
-        }
-
-        // Secondary entry point (for 2nd & subsequent TermsHash),
-        // because token text has already been "interned" into
-        // textStart, so we hash by textStart
-        public void Add(int textStart)
-        {
-            int termID = bytesHash.AddByPoolOffset(textStart);
-            if (termID >= 0) // New posting
-            {
-                // First time we are seeing this token since we last
-                // flushed the hash.
-                // Init stream slices
-                if (numPostingInt + intPool.Int32Upto > Int32BlockPool.INT32_BLOCK_SIZE)
-                {
-                    intPool.NextBuffer();
-                }
-
-                if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.ByteUpto < numPostingInt * ByteBlockPool.FIRST_LEVEL_SIZE)
-                {
-                    bytePool.NextBuffer();
-                }
-
-                intUptos = intPool.Buffer;
-                intUptoStart = intPool.Int32Upto;
-                intPool.Int32Upto += streamCount;
-
-                postingsArray.intStarts[termID] = intUptoStart + intPool.Int32Offset;
-
-                for (int i = 0; i < streamCount; i++)
-                {
-                    int upto = bytePool.NewSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
-                    intUptos[intUptoStart + i] = upto + bytePool.ByteOffset;
-                }
-                postingsArray.byteStarts[termID] = intUptos[intUptoStart];
-
-                consumer.NewTerm(termID);
-            }
-            else
-            {
-                termID = (-termID) - 1;
-                int intStart = postingsArray.intStarts[termID];
-                intUptos = intPool.Buffers[intStart >> Int32BlockPool.INT32_BLOCK_SHIFT];
-                intUptoStart = intStart & Int32BlockPool.INT32_BLOCK_MASK;
-                consumer.AddTerm(termID);
-            }
-        }
-
-        // Primary entry point (for first TermsHash)
-        internal override void Add()
-        {
-            termAtt.FillBytesRef();
-
-            // We are first in the chain so we must "intern" the
-            // term text into textStart address
-            // Get the text & hash of this term.
-            int termID;
-            try
-            {
-                termID = bytesHash.Add(termBytesRef);
-            }
-            catch (BytesRefHash.MaxBytesLengthExceededException)
-            {
-                // Term is too large; record this here (can't throw an
-                // exc because DocInverterPerField will then abort the
-                // entire segment) and then throw an exc later in
-                // DocInverterPerField.java.  LengthFilter can always be
-                // used to prune the term before indexing:
-                if (docState.maxTermPrefix == null)
-                {
-                    int saved = termBytesRef.Length;
-                    try
-                    {
-                        termBytesRef.Length = Math.Min(30, DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8);
-                        docState.maxTermPrefix = termBytesRef.ToString();
-                    }
-                    finally
-                    {
-                        termBytesRef.Length = saved;
-                    }
-                }
-                consumer.SkippingLongTerm();
-                return;
-            }
-            if (termID >= 0) // New posting
-            {
-                bytesHash.ByteStart(termID);
-                // Init stream slices
-                if (numPostingInt + intPool.Int32Upto > Int32BlockPool.INT32_BLOCK_SIZE)
-                {
-                    intPool.NextBuffer();
-                }
-
-                if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.ByteUpto < numPostingInt * ByteBlockPool.FIRST_LEVEL_SIZE)
-                {
-                    bytePool.NextBuffer();
-                }
-
-                intUptos = intPool.Buffer;
-                intUptoStart = intPool.Int32Upto;
-                intPool.Int32Upto += streamCount;
-
-                postingsArray.intStarts[termID] = intUptoStart + intPool.Int32Offset;
-
-                for (int i = 0; i < streamCount; i++)
-                {
-                    int upto = bytePool.NewSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
-                    intUptos[intUptoStart + i] = upto + bytePool.ByteOffset;
-                }
-                postingsArray.byteStarts[termID] = intUptos[intUptoStart];
-
-                consumer.NewTerm(termID);
-            }
-            else
-            {
-                termID = (-termID) - 1;
-                int intStart = postingsArray.intStarts[termID];
-                intUptos = intPool.Buffers[intStart >> Int32BlockPool.INT32_BLOCK_SHIFT];
-                intUptoStart = intStart & Int32BlockPool.INT32_BLOCK_MASK;
-                consumer.AddTerm(termID);
-            }
-
-            if (doNextCall)
-            {
-                nextPerField.Add(postingsArray.textStarts[termID]);
-            }
-        }
-
-        internal int[] intUptos;
-        internal int intUptoStart;
-
-        internal void WriteByte(int stream, sbyte b)
-        {
-            WriteByte(stream, (byte)b);
-        }
-
-        internal void WriteByte(int stream, byte b)
-        {
-            int upto = intUptos[intUptoStart + stream];
-            var bytes = bytePool.Buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
-            Debug.Assert(bytes != null);
-            int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
-            if (bytes[offset] != 0)
-            {
-                // End of slice; allocate a new one
-                offset = bytePool.AllocSlice(bytes, offset);
-                bytes = bytePool.Buffer;
-                intUptos[intUptoStart + stream] = offset + bytePool.ByteOffset;
-            }
-            bytes[offset] = b;
-            (intUptos[intUptoStart + stream])++;
-        }
-
-        public void WriteBytes(int stream, byte[] b, int offset, int len)
-        {
-            // TODO: optimize
-            int end = offset + len;
-            for (int i = offset; i < end; i++)
-            {
-                WriteByte(stream, b[i]);
-            }
-        }
-
-        /// <summary>
-        /// NOTE: This was writeVInt() in Lucene
-        /// </summary>
-        internal void WriteVInt32(int stream, int i)
-        {
-            Debug.Assert(stream < streamCount);
-            while ((i & ~0x7F) != 0)
-            {
-                WriteByte(stream, unchecked((sbyte)((i & 0x7f) | 0x80)));
-                i = (int)((uint)i >> 7);
-            }
-            WriteByte(stream, (sbyte)i);
-        }
-
-        internal override void Finish()
-        {
-            consumer.Finish();
-            if (nextPerField != null)
-            {
-                nextPerField.Finish();
-            }
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        private sealed class PostingsBytesStartArray : BytesRefHash.BytesStartArray
-        {
-            private readonly TermsHashPerField perField;
-            private readonly Counter bytesUsed;
-
-            internal PostingsBytesStartArray(TermsHashPerField perField, Counter bytesUsed)
-            {
-                this.perField = perField;
-                this.bytesUsed = bytesUsed;
-            }
-
-            public override int[] Init()
-            {
-                if (perField.postingsArray == null)
-                {
-                    perField.postingsArray = perField.consumer.CreatePostingsArray(2);
-                    bytesUsed.AddAndGet(perField.postingsArray.size * perField.postingsArray.BytesPerPosting());
-                }
-                return perField.postingsArray.textStarts;
-            }
-
-            public override int[] Grow()
-            {
-                ParallelPostingsArray postingsArray = perField.postingsArray;
-                int oldSize = perField.postingsArray.size;
-                postingsArray = perField.postingsArray = postingsArray.Grow();
-                bytesUsed.AddAndGet((postingsArray.BytesPerPosting() * (postingsArray.size - oldSize)));
-                return postingsArray.textStarts;
-            }
-
-            public override int[] Clear()
-            {
-                if (perField.postingsArray != null)
-                {
-                    bytesUsed.AddAndGet(-(perField.postingsArray.size * perField.postingsArray.BytesPerPosting()));
-                    perField.postingsArray = null;
-                }
-                return null;
-            }
-
-            public override Counter BytesUsed()
-            {
-                return bytesUsed;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/ThreadAffinityDocumentsWriterThreadPool.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/ThreadAffinityDocumentsWriterThreadPool.cs b/src/Lucene.Net.Core/Index/ThreadAffinityDocumentsWriterThreadPool.cs
deleted file mode 100644
index 9d0d5e3..0000000
--- a/src/Lucene.Net.Core/Index/ThreadAffinityDocumentsWriterThreadPool.cs
+++ /dev/null
@@ -1,100 +0,0 @@
-using System;
-using System.Collections.Concurrent;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Threading;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements. See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License. You may obtain a copy of the License at
-     *
-     * http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using ThreadState = Lucene.Net.Index.DocumentsWriterPerThreadPool.ThreadState; //javadoc
-
-    /// <summary>
-    /// A <seealso cref="DocumentsWriterPerThreadPool"/> implementation that tries to assign an
-    /// indexing thread to the same <seealso cref="ThreadState"/> each time the thread tries to
-    /// obtain a <seealso cref="ThreadState"/>. Once a new <seealso cref="ThreadState"/> is created it is
-    /// associated with the creating thread. Subsequently, if the threads associated
-    /// <seealso cref="ThreadState"/> is not in use it will be associated with the requesting
-    /// thread. Otherwise, if the <seealso cref="ThreadState"/> is used by another thread
-    /// <seealso cref="ThreadAffinityDocumentsWriterThreadPool"/> tries to find the currently
-    /// minimal contended <seealso cref="ThreadState"/>.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal class ThreadAffinityDocumentsWriterThreadPool : DocumentsWriterPerThreadPool
-    {
-        private IDictionary<Thread, ThreadState> threadBindings = new ConcurrentDictionary<Thread, ThreadState>();
-
-        /// <summary>
-        /// Creates a new <seealso cref="ThreadAffinityDocumentsWriterThreadPool"/> with a given maximum of <seealso cref="ThreadState"/>s.
-        /// </summary>
-        public ThreadAffinityDocumentsWriterThreadPool(int maxNumPerThreads)
-            : base(maxNumPerThreads)
-        {
-            Debug.Assert(MaxThreadStates >= 1);
-        }
-
-        public override ThreadState GetAndLock(Thread requestingThread, DocumentsWriter documentsWriter)
-        {
-            ThreadState threadState;
-            threadBindings.TryGetValue(requestingThread, out threadState);
-            if (threadState != null && threadState.TryLock())
-            {
-                return threadState;
-            }
-            ThreadState minThreadState = null;
-
-            /* TODO -- another thread could lock the minThreadState we just got while
-             we should somehow prevent this. */
-            // Find the state that has minimum number of threads waiting
-            minThreadState = MinContendedThreadState();
-            if (minThreadState == null || minThreadState.HasQueuedThreads)
-            {
-                ThreadState newState = NewThreadState(); // state is already locked if non-null
-                if (newState != null)
-                {
-                    //Debug.Assert(newState.HeldByCurrentThread);
-                    threadBindings[requestingThread] = newState;
-                    return newState;
-                }
-                else if (minThreadState == null)
-                {
-                    /*
-                     * no new threadState available we just take the minContented one
-                     * this must return a valid thread state since we accessed the
-                     * synced context in newThreadState() above.
-                     */
-                    minThreadState = MinContendedThreadState();
-                }
-            }
-            Debug.Assert(minThreadState != null, "ThreadState is null");
-
-            minThreadState.@Lock();
-            return minThreadState;
-        }
-
-        public override object Clone()
-        {
-            ThreadAffinityDocumentsWriterThreadPool clone = (ThreadAffinityDocumentsWriterThreadPool)base.Clone();
-            clone.threadBindings = new ConcurrentDictionary<Thread, ThreadState>();
-            return clone;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TieredMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TieredMergePolicy.cs b/src/Lucene.Net.Core/Index/TieredMergePolicy.cs
deleted file mode 100644
index f41f6c6..0000000
--- a/src/Lucene.Net.Core/Index/TieredMergePolicy.cs
+++ /dev/null
@@ -1,779 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Generic;
-using System.Globalization;
-using System.Text;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// Merges segments of approximately equal size, subject to
-    /// an allowed number of segments per tier.  This is similar
-    /// to <see cref="LogByteSizeMergePolicy"/>, except this merge
-    /// policy is able to merge non-adjacent segment, and
-    /// separates how many segments are merged at once (<see cref="MaxMergeAtOnce"/>) 
-    /// from how many segments are allowed
-    /// per tier (<see cref="SegmentsPerTier"/>).  This merge
-    /// policy also does not over-merge (i.e. cascade merges).
-    ///
-    /// <para/>For normal merging, this policy first computes a
-    /// "budget" of how many segments are allowed to be in the
-    /// index.  If the index is over-budget, then the policy
-    /// sorts segments by decreasing size (pro-rating by percent
-    /// deletes), and then finds the least-cost merge.  Merge
-    /// cost is measured by a combination of the "skew" of the
-    /// merge (size of largest segment divided by smallest segment),
-    /// total merge size and percent deletes reclaimed,
-    /// so that merges with lower skew, smaller size
-    /// and those reclaiming more deletes, are
-    /// favored.
-    ///
-    /// <para/>If a merge will produce a segment that's larger than
-    /// <see cref="MaxMergedSegmentMB"/>, then the policy will
-    /// merge fewer segments (down to 1 at once, if that one has
-    /// deletions) to keep the segment size under budget.
-    ///
-    /// <para/><b>NOTE</b>: This policy freely merges non-adjacent
-    /// segments; if this is a problem, use <see cref="LogMergePolicy"/>.
-    ///
-    /// <para/><b>NOTE</b>: This policy always merges by byte size
-    /// of the segments, always pro-rates by percent deletes,
-    /// and does not apply any maximum segment size during
-    /// forceMerge (unlike <see cref="LogByteSizeMergePolicy"/>).
-    ///
-    /// @lucene.experimental
-    /// </summary>
-
-    // TODO
-    //   - we could try to take into account whether a large
-    //     merge is already running (under CMS) and then bias
-    //     ourselves towards picking smaller merges if so (or,
-    //     maybe CMS should do so)
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class TieredMergePolicy : MergePolicy
-    {
-        /// <summary>
-        /// Default noCFSRatio.  If a merge's size is >= 10% of
-        /// the index, then we disable compound file for it.
-        /// </summary>
-        /// <seealso cref="MergePolicy.NoCFSRatio"/>
-        public new static readonly double DEFAULT_NO_CFS_RATIO = 0.1;
-
-        private int maxMergeAtOnce = 10;
-        private long maxMergedSegmentBytes = 5 * 1024 * 1024 * 1024L;
-        private int maxMergeAtOnceExplicit = 30;
-
-        private long floorSegmentBytes = 2 * 1024 * 1024L;
-        private double segsPerTier = 10.0;
-        private double forceMergeDeletesPctAllowed = 10.0;
-        private double reclaimDeletesWeight = 2.0;
-
-        /// <summary>
-        /// Sole constructor, setting all settings to their
-        /// defaults.
-        /// </summary>
-        public TieredMergePolicy()
-            : base(DEFAULT_NO_CFS_RATIO, MergePolicy.DEFAULT_MAX_CFS_SEGMENT_SIZE)
-        {
-        }
-
-        /// <summary>
-        /// Gets or sets maximum number of segments to be merged at a time
-        /// during "normal" merging.  For explicit merging (eg,
-        /// <see cref="IndexWriter.ForceMerge(int)"/> or 
-        /// <see cref="IndexWriter.ForceMergeDeletes()"/> was called), see 
-        /// <see cref="MaxMergeAtOnceExplicit"/>.  Default is 10.
-        /// </summary>
-        public virtual int MaxMergeAtOnce
-        {
-            get
-            {
-                return maxMergeAtOnce;
-            }
-            set
-            {
-                if (value < 2)
-                {
-                    throw new System.ArgumentException("maxMergeAtOnce must be > 1 (got " + value + ")");
-                }
-                maxMergeAtOnce = value;
-            }
-        }
-
-        // TODO: should addIndexes do explicit merging, too?  And,
-        // if user calls IW.maybeMerge "explicitly"
-
-        /// <summary>
-        /// Gets or sets maximum number of segments to be merged at a time,
-        /// during <see cref="IndexWriter.ForceMerge(int)"/> or 
-        /// <see cref="IndexWriter.ForceMergeDeletes()"/>. Default is 30.
-        /// </summary>
-        public virtual int MaxMergeAtOnceExplicit
-        {
-            get
-            {
-                return maxMergeAtOnceExplicit;
-            }
-            set
-            {
-                if (value < 2)
-                {
-                    throw new System.ArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + value + ")");
-                }
-                maxMergeAtOnceExplicit = value;
-            }
-        }
-
-        /// <summary>
-        /// Gets or sets maximum sized segment to produce during
-        /// normal merging.  This setting is approximate: the
-        /// estimate of the merged segment size is made by summing
-        /// sizes of to-be-merged segments (compensating for
-        /// percent deleted docs).  Default is 5 GB.
-        /// </summary>
-        public virtual double MaxMergedSegmentMB
-        {
-            get
-            {
-                return maxMergedSegmentBytes / 1024 / 1024.0;
-            }
-            set
-            {
-                if (value < 0.0)
-                {
-                    throw new System.ArgumentException("maxMergedSegmentMB must be >=0 (got " + value.ToString("0.0") + ")");
-                }
-                value *= 1024 * 1024;
-                maxMergedSegmentBytes = (value > long.MaxValue) ? long.MaxValue : (long)value;
-            }
-        }
-
-        /// <summary>
-        /// Controls how aggressively merges that reclaim more
-        /// deletions are favored.  Higher values will more
-        /// aggressively target merges that reclaim deletions, but
-        /// be careful not to go so high that way too much merging
-        /// takes place; a value of 3.0 is probably nearly too
-        /// high.  A value of 0.0 means deletions don't impact
-        /// merge selection.
-        /// </summary>
-        public virtual double ReclaimDeletesWeight
-        {
-            get
-            {
-                return reclaimDeletesWeight;
-            }
-            set
-            {
-                if (value < 0.0)
-                {
-                    throw new System.ArgumentException("reclaimDeletesWeight must be >= 0.0 (got " + value.ToString("0.0") + ")");
-                }
-                reclaimDeletesWeight = value;
-            }
-        }
-
-        /// <summary>
-        /// Segments smaller than this are "rounded up" to this
-        /// size, ie treated as equal (floor) size for merge
-        /// selection.  this is to prevent frequent flushing of
-        /// tiny segments from allowing a long tail in the index.
-        /// Default is 2 MB.
-        /// </summary>
-        public virtual double FloorSegmentMB
-        {
-            get
-            {
-                return floorSegmentBytes / (1024 * 1024.0);
-            }
-            set
-            {
-                if (value <= 0.0)
-                {
-                    throw new System.ArgumentException("floorSegmentMB must be >= 0.0 (got " + value.ToString("0.0") + ")");
-                }
-                value *= 1024 * 1024;
-                floorSegmentBytes = (value > long.MaxValue) ? long.MaxValue : (long)value;
-            }
-        }
-
-        /// <summary>
-        /// When forceMergeDeletes is called, we only merge away a
-        /// segment if its delete percentage is over this
-        /// threshold.  Default is 10%.
-        /// </summary>
-        public virtual double ForceMergeDeletesPctAllowed
-        {
-            get
-            {
-                return forceMergeDeletesPctAllowed;
-            }
-            set
-            {
-                if (value < 0.0 || value > 100.0)
-                {
-                    throw new System.ArgumentException("forceMergeDeletesPctAllowed must be between 0.0 and 100.0 inclusive (got " + value.ToString("0.0") + ")");
-                }
-                forceMergeDeletesPctAllowed = value;
-            }
-        }
-
-        /// <summary>
-        /// Gets or sets the allowed number of segments per tier.  Smaller
-        /// values mean more merging but fewer segments.
-        /// 
-        /// <para/><b>NOTE</b>: this value should be >= the 
-        /// <see cref="MaxMergeAtOnce"/> otherwise you'll force too much
-        /// merging to occur.
-        /// 
-        /// <para/>Default is 10.0.
-        /// </summary>
-        public virtual double SegmentsPerTier
-        {
-            get
-            {
-                return segsPerTier;
-            }
-            set
-            {
-                if (value < 2.0)
-                {
-                    throw new System.ArgumentException("segmentsPerTier must be >= 2.0 (got " + value.ToString("0.0") + ")");
-                }
-                segsPerTier = value;
-            }
-        }
-
-        private class SegmentByteSizeDescending : IComparer<SegmentCommitInfo>
-        {
-            private readonly TieredMergePolicy outerInstance;
-
-            public SegmentByteSizeDescending(TieredMergePolicy outerInstance)
-            {
-                this.outerInstance = outerInstance;
-            }
-
-            public virtual int Compare(SegmentCommitInfo o1, SegmentCommitInfo o2)
-            {
-                try
-                {
-                    long sz1 = outerInstance.Size(o1);
-                    long sz2 = outerInstance.Size(o2);
-                    if (sz1 > sz2)
-                    {
-                        return -1;
-                    }
-                    else if (sz2 > sz1)
-                    {
-                        return 1;
-                    }
-                    else
-                    {
-                        return o1.Info.Name.CompareToOrdinal(o2.Info.Name);
-                    }
-                }
-                catch (System.IO.IOException ioe)
-                {
-                    throw new Exception(ioe.ToString(), ioe);
-                }
-            }
-        }
-
-        /// <summary>
-        /// Holds score and explanation for a single candidate
-        /// merge.
-        /// </summary>
-        protected abstract class MergeScore
-        {
-            /// <summary>
-            /// Sole constructor. (For invocation by subclass
-            /// constructors, typically implicit.)
-            /// </summary>
-            protected MergeScore()
-            {
-            }
-
-            /// <summary>
-            /// Returns the score for this merge candidate; lower
-            /// scores are better.
-            /// </summary>
-            public abstract double Score { get; }
-
-            /// <summary>
-            /// Human readable explanation of how the merge got this
-            /// score.
-            /// </summary>
-            public abstract string Explanation { get; }
-        }
-
-        public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos infos)
-        {
-            if (Verbose())
-            {
-                Message("findMerges: " + infos.Count + " segments");
-            }
-            if (infos.Count == 0)
-            {
-                return null;
-            }
-            ICollection<SegmentCommitInfo> merging = m_writer.Get().MergingSegments;
-            ICollection<SegmentCommitInfo> toBeMerged = new HashSet<SegmentCommitInfo>();
-
-            List<SegmentCommitInfo> infosSorted = new List<SegmentCommitInfo>(infos.AsList());
-            infosSorted.Sort(new SegmentByteSizeDescending(this));
-
-            // Compute total index bytes & print details about the index
-            long totIndexBytes = 0;
-            long minSegmentBytes = long.MaxValue;
-            foreach (SegmentCommitInfo info in infosSorted)
-            {
-                long segBytes = Size(info);
-                if (Verbose())
-                {
-                    string extra = merging.Contains(info) ? " [merging]" : "";
-                    if (segBytes >= maxMergedSegmentBytes / 2.0)
-                    {
-                        extra += " [skip: too large]";
-                    }
-                    else if (segBytes < floorSegmentBytes)
-                    {
-                        extra += " [floored]";
-                    }
-                    Message("  seg=" + m_writer.Get().SegString(info) + " size=" + string.Format("{0:0.000}", segBytes / 1024 / 1024.0) + " MB" + extra);
-                }
-
-                minSegmentBytes = Math.Min(segBytes, minSegmentBytes);
-                // Accum total byte size
-                totIndexBytes += segBytes;
-            }
-
-            // If we have too-large segments, grace them out
-            // of the maxSegmentCount:
-            int tooBigCount = 0;
-            while (tooBigCount < infosSorted.Count && Size(infosSorted[tooBigCount]) >= maxMergedSegmentBytes / 2.0)
-            {
-                totIndexBytes -= Size(infosSorted[tooBigCount]);
-                tooBigCount++;
-            }
-
-            minSegmentBytes = FloorSize(minSegmentBytes);
-
-            // Compute max allowed segs in the index
-            long levelSize = minSegmentBytes;
-            long bytesLeft = totIndexBytes;
-            double allowedSegCount = 0;
-            while (true)
-            {
-                double segCountLevel = bytesLeft / (double)levelSize;
-                if (segCountLevel < segsPerTier)
-                {
-                    allowedSegCount += Math.Ceiling(segCountLevel);
-                    break;
-                }
-                allowedSegCount += segsPerTier;
-                bytesLeft -= (long)(segsPerTier * levelSize);
-                levelSize *= maxMergeAtOnce;
-            }
-            int allowedSegCountInt = (int)allowedSegCount;
-
-            MergeSpecification spec = null;
-
-            // Cycle to possibly select more than one merge:
-            while (true)
-            {
-                long mergingBytes = 0;
-
-                // Gather eligible segments for merging, ie segments
-                // not already being merged and not already picked (by
-                // prior iteration of this loop) for merging:
-                IList<SegmentCommitInfo> eligible = new List<SegmentCommitInfo>();
-                for (int idx = tooBigCount; idx < infosSorted.Count; idx++)
-                {
-                    SegmentCommitInfo info = infosSorted[idx];
-                    if (merging.Contains(info))
-                    {
-                        mergingBytes += info.GetSizeInBytes();
-                    }
-                    else if (!toBeMerged.Contains(info))
-                    {
-                        eligible.Add(info);
-                    }
-                }
-
-                bool maxMergeIsRunning = mergingBytes >= maxMergedSegmentBytes;
-
-                if (Verbose())
-                {
-                    Message("  allowedSegmentCount=" + allowedSegCountInt + " vs count=" + infosSorted.Count + " (eligible count=" + eligible.Count + ") tooBigCount=" + tooBigCount);
-                }
-
-                if (eligible.Count == 0)
-                {
-                    return spec;
-                }
-
-                if (eligible.Count >= allowedSegCountInt)
-                {
-                    // OK we are over budget -- find best merge!
-                    MergeScore bestScore = null;
-                    IList<SegmentCommitInfo> best = null;
-                    bool bestTooLarge = false;
-                    long bestMergeBytes = 0;
-
-                    // Consider all merge starts:
-                    for (int startIdx = 0; startIdx <= eligible.Count - maxMergeAtOnce; startIdx++)
-                    {
-                        long totAfterMergeBytes = 0;
-
-                        IList<SegmentCommitInfo> candidate = new List<SegmentCommitInfo>();
-                        bool hitTooLarge = false;
-                        for (int idx = startIdx; idx < eligible.Count && candidate.Count < maxMergeAtOnce; idx++)
-                        {
-                            SegmentCommitInfo info = eligible[idx];
-                            long segBytes = Size(info);
-
-                            if (totAfterMergeBytes + segBytes > maxMergedSegmentBytes)
-                            {
-                                hitTooLarge = true;
-                                // NOTE: we continue, so that we can try
-                                // "packing" smaller segments into this merge
-                                // to see if we can get closer to the max
-                                // size; this in general is not perfect since
-                                // this is really "bin packing" and we'd have
-                                // to try different permutations.
-                                continue;
-                            }
-                            candidate.Add(info);
-                            totAfterMergeBytes += segBytes;
-                        }
-
-                        MergeScore score = Score(candidate, hitTooLarge, mergingBytes);
-                        if (Verbose())
-                        {
-                            Message("  maybe=" + m_writer.Get().SegString(candidate) + " score=" + score.Score + " " + score.Explanation + " tooLarge=" + hitTooLarge + " size=" + string.Format("{0:0.000} MB", totAfterMergeBytes / 1024.0 / 1024.0));
-                        }
-
-                        // If we are already running a max sized merge
-                        // (maxMergeIsRunning), don't allow another max
-                        // sized merge to kick off:
-                        if ((bestScore == null || score.Score < bestScore.Score) && (!hitTooLarge || !maxMergeIsRunning))
-                        {
-                            best = candidate;
-                            bestScore = score;
-                            bestTooLarge = hitTooLarge;
-                            bestMergeBytes = totAfterMergeBytes;
-                        }
-                    }
-
-                    if (best != null)
-                    {
-                        if (spec == null)
-                        {
-                            spec = new MergeSpecification();
-                        }
-                        OneMerge merge = new OneMerge(best);
-                        spec.Add(merge);
-                        foreach (SegmentCommitInfo info in merge.Segments)
-                        {
-                            toBeMerged.Add(info);
-                        }
-
-                        if (Verbose())
-                        {
-                            Message("  add merge=" + m_writer.Get().SegString(merge.Segments) + " size=" + string.Format("{0:0.000} MB", bestMergeBytes / 1024.0 / 1024.0) + " score=" + string.Format("{0:0.000}", bestScore.Score) + " " + bestScore.Explanation + (bestTooLarge ? " [max merge]" : ""));
-                        }
-                    }
-                    else
-                    {
-                        return spec;
-                    }
-                }
-                else
-                {
-                    return spec;
-                }
-            }
-        }
-
-        /// <summary>
-        /// Expert: scores one merge; subclasses can override. </summary>
-        protected virtual MergeScore Score(IList<SegmentCommitInfo> candidate, bool hitTooLarge, long mergingBytes)
-        {
-            long totBeforeMergeBytes = 0;
-            long totAfterMergeBytes = 0;
-            long totAfterMergeBytesFloored = 0;
-            foreach (SegmentCommitInfo info in candidate)
-            {
-                long segBytes = Size(info);
-                totAfterMergeBytes += segBytes;
-                totAfterMergeBytesFloored += FloorSize(segBytes);
-                totBeforeMergeBytes += info.GetSizeInBytes();
-            }
-
-            // Roughly measure "skew" of the merge, i.e. how
-            // "balanced" the merge is (whether the segments are
-            // about the same size), which can range from
-            // 1.0/numSegsBeingMerged (good) to 1.0 (poor). Heavily
-            // lopsided merges (skew near 1.0) is no good; it means
-            // O(N^2) merge cost over time:
-            double skew;
-            if (hitTooLarge)
-            {
-                // Pretend the merge has perfect skew; skew doesn't
-                // matter in this case because this merge will not
-                // "cascade" and so it cannot lead to N^2 merge cost
-                // over time:
-                skew = 1.0 / maxMergeAtOnce;
-            }
-            else
-            {
-                skew = ((double)FloorSize(Size(candidate[0]))) / totAfterMergeBytesFloored;
-            }
-
-            // Strongly favor merges with less skew (smaller
-            // mergeScore is better):
-            double mergeScore = skew;
-
-            // Gently favor smaller merges over bigger ones.  We
-            // don't want to make this exponent too large else we
-            // can end up doing poor merges of small segments in
-            // order to avoid the large merges:
-            mergeScore *= Math.Pow(totAfterMergeBytes, 0.05);
-
-            // Strongly favor merges that reclaim deletes:
-            double nonDelRatio = ((double)totAfterMergeBytes) / totBeforeMergeBytes;
-            mergeScore *= Math.Pow(nonDelRatio, reclaimDeletesWeight);
-
-            double finalMergeScore = mergeScore;
-
-            return new MergeScoreAnonymousInnerClassHelper(this, skew, nonDelRatio, finalMergeScore);
-        }
-
-        private class MergeScoreAnonymousInnerClassHelper : MergeScore
-        {
-            private readonly TieredMergePolicy outerInstance;
-
-            private double skew;
-            private double nonDelRatio;
-            private double finalMergeScore;
-
-            public MergeScoreAnonymousInnerClassHelper(TieredMergePolicy outerInstance, double skew, double nonDelRatio, double finalMergeScore)
-            {
-                this.outerInstance = outerInstance;
-                this.skew = skew;
-                this.nonDelRatio = nonDelRatio;
-                this.finalMergeScore = finalMergeScore;
-            }
-
-            public override double Score
-            {
-                get
-                {
-                    return finalMergeScore;
-                }
-            }
-
-            public override string Explanation
-            {
-                get
-                {
-                    return "skew=" + string.Format(CultureInfo.InvariantCulture, "{0:F3}", skew) + " nonDelRatio=" + string.Format(CultureInfo.InvariantCulture, "{0:F3}", nonDelRatio);
-                }
-            }
-        }
-
-        public override MergeSpecification FindForcedMerges(SegmentInfos infos, int maxSegmentCount, IDictionary<SegmentCommitInfo, bool?> segmentsToMerge)
-        {
-            if (Verbose())
-            {
-                Message("FindForcedMerges maxSegmentCount=" + maxSegmentCount + " infos=" + m_writer.Get().SegString(infos.Segments) + " segmentsToMerge=" + Arrays.ToString(segmentsToMerge));
-            }
-
-            List<SegmentCommitInfo> eligible = new List<SegmentCommitInfo>();
-            bool forceMergeRunning = false;
-            ICollection<SegmentCommitInfo> merging = m_writer.Get().MergingSegments;
-            bool? segmentIsOriginal = false;
-            foreach (SegmentCommitInfo info in infos.Segments)
-            {
-                bool? isOriginal;
-                if (segmentsToMerge.TryGetValue(info, out isOriginal))
-                {
-                    segmentIsOriginal = isOriginal;
-                    if (!merging.Contains(info))
-                    {
-                        eligible.Add(info);
-                    }
-                    else
-                    {
-                        forceMergeRunning = true;
-                    }
-                }
-            }
-
-            if (eligible.Count == 0)
-            {
-                return null;
-            }
-
-            if ((maxSegmentCount > 1 && eligible.Count <= maxSegmentCount) || (maxSegmentCount == 1 && eligible.Count == 1 && (segmentIsOriginal == false || IsMerged(infos, eligible[0]))))
-            {
-                if (Verbose())
-                {
-                    Message("already merged");
-                }
-                return null;
-            }
-
-            eligible.Sort(new SegmentByteSizeDescending(this));
-
-            if (Verbose())
-            {
-                Message("eligible=" + Arrays.ToString(eligible));
-                Message("forceMergeRunning=" + forceMergeRunning);
-            }
-
-            int end = eligible.Count;
-
-            MergeSpecification spec = null;
-
-            // Do full merges, first, backwards:
-            while (end >= maxMergeAtOnceExplicit + maxSegmentCount - 1)
-            {
-                if (spec == null)
-                {
-                    spec = new MergeSpecification();
-                }
-                OneMerge merge = new OneMerge(eligible.SubList(end - maxMergeAtOnceExplicit, end));
-                if (Verbose())
-                {
-                    Message("add merge=" + m_writer.Get().SegString(merge.Segments));
-                }
-                spec.Add(merge);
-                end -= maxMergeAtOnceExplicit;
-            }
-
-            if (spec == null && !forceMergeRunning)
-            {
-                // Do final merge
-                int numToMerge = end - maxSegmentCount + 1;
-                OneMerge merge = new OneMerge(eligible.SubList(end - numToMerge, end));
-                if (Verbose())
-                {
-                    Message("add final merge=" + merge.SegString(m_writer.Get().Directory));
-                }
-                spec = new MergeSpecification();
-                spec.Add(merge);
-            }
-
-            return spec;
-        }
-
-        public override MergeSpecification FindForcedDeletesMerges(SegmentInfos infos)
-        {
-            if (Verbose())
-            {
-                Message("findForcedDeletesMerges infos=" + m_writer.Get().SegString(infos.Segments) + " forceMergeDeletesPctAllowed=" + forceMergeDeletesPctAllowed);
-            }
-            List<SegmentCommitInfo> eligible = new List<SegmentCommitInfo>();
-            ICollection<SegmentCommitInfo> merging = m_writer.Get().MergingSegments;
-            foreach (SegmentCommitInfo info in infos.Segments)
-            {
-                double pctDeletes = 100.0 * ((double)m_writer.Get().NumDeletedDocs(info)) / info.Info.DocCount;
-                if (pctDeletes > forceMergeDeletesPctAllowed && !merging.Contains(info))
-                {
-                    eligible.Add(info);
-                }
-            }
-
-            if (eligible.Count == 0)
-            {
-                return null;
-            }
-
-            eligible.Sort(new SegmentByteSizeDescending(this));
-
-            if (Verbose())
-            {
-                Message("eligible=" + Arrays.ToString(eligible));
-            }
-
-            int start = 0;
-            MergeSpecification spec = null;
-
-            while (start < eligible.Count)
-            {
-                // Don't enforce max merged size here: app is explicitly
-                // calling forceMergeDeletes, and knows this may take a
-                // long time / produce big segments (like forceMerge):
-                int end = Math.Min(start + maxMergeAtOnceExplicit, eligible.Count);
-                if (spec == null)
-                {
-                    spec = new MergeSpecification();
-                }
-
-                OneMerge merge = new OneMerge(eligible.SubList(start, end));
-                if (Verbose())
-                {
-                    Message("add merge=" + m_writer.Get().SegString(merge.Segments));
-                }
-                spec.Add(merge);
-                start = end;
-            }
-
-            return spec;
-        }
-
-        protected override void Dispose(bool disposing)
-        {
-        }
-
-        private long FloorSize(long bytes)
-        {
-            return Math.Max(floorSegmentBytes, bytes);
-        }
-
-        private bool Verbose()
-        {
-            IndexWriter w = m_writer.Get();
-            return w != null && w.infoStream.IsEnabled("TMP");
-        }
-
-        private void Message(string message)
-        {
-            m_writer.Get().infoStream.Message("TMP", message);
-        }
-
-        public override string ToString()
-        {
-            StringBuilder sb = new StringBuilder("[" + this.GetType().Name + ": ");
-            sb.Append("maxMergeAtOnce=").Append(maxMergeAtOnce).Append(", ");
-            sb.Append("maxMergeAtOnceExplicit=").Append(maxMergeAtOnceExplicit).Append(", ");
-            sb.Append("maxMergedSegmentMB=").Append(maxMergedSegmentBytes / 1024 / 1024.0).Append(", ");
-            sb.Append("floorSegmentMB=").Append(floorSegmentBytes / 1024 / 1024.0).Append(", ");
-            sb.Append("forceMergeDeletesPctAllowed=").Append(forceMergeDeletesPctAllowed).Append(", ");
-            sb.Append("segmentsPerTier=").Append(segsPerTier).Append(", ");
-            sb.Append("maxCFSSegmentSizeMB=").Append(MaxCFSSegmentSizeMB).Append(", ");
-            sb.Append("noCFSRatio=").Append(m_noCFSRatio);
-            return sb.ToString();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TrackingIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TrackingIndexWriter.cs b/src/Lucene.Net.Core/Index/TrackingIndexWriter.cs
deleted file mode 100644
index 5789685..0000000
--- a/src/Lucene.Net.Core/Index/TrackingIndexWriter.cs
+++ /dev/null
@@ -1,276 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Generic;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using Analyzer = Lucene.Net.Analysis.Analyzer;
-    using Directory = Lucene.Net.Store.Directory;
-    using Query = Lucene.Net.Search.Query;
-
-    /// <summary>
-    /// Class that tracks changes to a delegated
-    ///  IndexWriter, used by {@link
-    ///  ControlledRealTimeReopenThread} to ensure specific
-    ///  changes are visible.   Create this class (passing your
-    ///  IndexWriter), and then pass this class to {@link
-    ///  ControlledRealTimeReopenThread}.
-    ///  Be sure to make all changes via the
-    ///  TrackingIndexWriter, otherwise {@link
-    ///  ControlledRealTimeReopenThread} won't know about the changes.
-    ///
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class TrackingIndexWriter
-    {
-        private readonly IndexWriter writer;
-        private readonly AtomicInt64 indexingGen = new AtomicInt64(1);
-
-        /// <summary>
-        /// Create a {@code TrackingIndexWriter} wrapping the
-        ///  provided <seealso cref="IndexWriter"/>.
-        /// </summary>
-        public TrackingIndexWriter(IndexWriter writer)
-        {
-            this.writer = writer;
-        }
-
-        /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#updateDocument(Term,Iterable,Analyzer)}
-        ///  and returns the generation that reflects this change.
-        /// </summary>
-        public virtual long UpdateDocument(Term t, IEnumerable<IIndexableField> d, Analyzer a)
-        {
-            writer.UpdateDocument(t, d, a);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#updateDocument(Term,Iterable)} and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long UpdateDocument(Term t, IEnumerable<IIndexableField> d)
-        {
-            writer.UpdateDocument(t, d);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#updateDocuments(Term,Iterable,Analyzer)}
-        ///  and returns the generation that reflects this change.
-        /// </summary>
-        public virtual long UpdateDocuments(Term t, IEnumerable<IEnumerable<IIndexableField>> docs, Analyzer a)
-        {
-            writer.UpdateDocuments(t, docs, a);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#updateDocuments(Term,Iterable)} and returns
-        ///  the generation that reflects this change.
-        /// </summary>
-        public virtual long UpdateDocuments(Term t, IEnumerable<IEnumerable<IIndexableField>> docs)
-        {
-            writer.UpdateDocuments(t, docs);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteDocuments(Term)"/> and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long DeleteDocuments(Term t)
-        {
-            writer.DeleteDocuments(t);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteDocuments(Term...)"/> and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long DeleteDocuments(params Term[] terms)
-        {
-            writer.DeleteDocuments(terms);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteDocuments(Query)"/> and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long DeleteDocuments(Query q)
-        {
-            writer.DeleteDocuments(q);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteDocuments(Query...)"/>
-        ///  and returns the generation that reflects this change.
-        /// </summary>
-        public virtual long DeleteDocuments(params Query[] queries)
-        {
-            writer.DeleteDocuments(queries);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteAll"/> and returns the
-        ///  generation that reflects this change.
-        /// </summary>
-        public virtual long DeleteAll()
-        {
-            writer.DeleteAll();
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#addDocument(Iterable,Analyzer)} and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long AddDocument(IEnumerable<IIndexableField> d, Analyzer a)
-        {
-            writer.AddDocument(d, a);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#addDocuments(Iterable,Analyzer)} and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long AddDocuments(IEnumerable<IEnumerable<IIndexableField>> docs, Analyzer a)
-        {
-            writer.AddDocuments(docs, a);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#addDocument(Iterable)"/>
-        ///  and returns the generation that reflects this change.
-        /// </summary>
-        public virtual long AddDocument(IEnumerable<IIndexableField> d)
-        {
-            writer.AddDocument(d);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#addDocuments(Iterable)"/> and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long AddDocuments(IEnumerable<IEnumerable<IIndexableField>> docs)
-        {
-            writer.AddDocuments(docs);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#addIndexes(Directory...)"/> and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long AddIndexes(params Directory[] dirs)
-        {
-            writer.AddIndexes(dirs);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Calls <seealso cref="IndexWriter#addIndexes(IndexReader...)"/>
-        ///  and returns the generation that reflects this change.
-        /// </summary>
-        public virtual long AddIndexes(params IndexReader[] readers)
-        {
-            writer.AddIndexes(readers);
-            // Return gen as of when indexing finished:
-            return indexingGen.Get();
-        }
-
-        /// <summary>
-        /// Return the current generation being indexed. </summary>
-        public virtual long Generation
-        {
-            get
-            {
-                return indexingGen.Get();
-            }
-        }
-
-        /// <summary>
-        /// Return the wrapped <seealso cref="IndexWriter"/>. </summary>
-        public virtual IndexWriter IndexWriter
-        {
-            get
-            {
-                return writer;
-            }
-        }
-
-        /// <summary>
-        /// Return and increment current gen.
-        ///
-        /// @lucene.internal
-        /// </summary>
-        public virtual long GetAndIncrementGeneration()
-        {
-            return indexingGen.GetAndIncrement();
-        }
-
-        /// <summary>
-        /// Cals {@link
-        ///  IndexWriter#tryDeleteDocument(IndexReader,int)} and
-        ///  returns the generation that reflects this change.
-        /// </summary>
-        public virtual long TryDeleteDocument(IndexReader reader, int docID)
-        {
-            if (writer.TryDeleteDocument(reader, docID))
-            {
-                return indexingGen.Get();
-            }
-            else
-            {
-                return -1;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TwoPhaseCommit.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TwoPhaseCommit.cs b/src/Lucene.Net.Core/Index/TwoPhaseCommit.cs
deleted file mode 100644
index 0fef4a6..0000000
--- a/src/Lucene.Net.Core/Index/TwoPhaseCommit.cs
+++ /dev/null
@@ -1,53 +0,0 @@
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// An interface for implementations that support 2-phase commit. You can use
-    /// <seealso cref="TwoPhaseCommitTool"/> to execute a 2-phase commit algorithm over several
-    /// <seealso cref="ITwoPhaseCommit"/>s.
-    ///
-    /// @lucene.experimental
-    /// </summary>
-    public interface ITwoPhaseCommit
-    {
-        /// <summary>
-        /// The first stage of a 2-phase commit. Implementations should do as much work
-        /// as possible in this method, but avoid actual committing changes. If the
-        /// 2-phase commit fails, <seealso cref="#rollback()"/> is called to discard all changes
-        /// since last successful commit.
-        /// </summary>
-        void PrepareCommit();
-
-        /// <summary>
-        /// The second phase of a 2-phase commit. Implementations should ideally do
-        /// very little work in this method (following <seealso cref="#prepareCommit()"/>, and
-        /// after it returns, the caller can assume that the changes were successfully
-        /// committed to the underlying storage.
-        /// </summary>
-        void Commit();
-
-        /// <summary>
-        /// Discards any changes that have occurred since the last commit. In a 2-phase
-        /// commit algorithm, where one of the objects failed to <seealso cref="#commit()"/> or
-        /// <seealso cref="#prepareCommit()"/>, this method is used to roll all other objects
-        /// back to their previous state.
-        /// </summary>
-        void Rollback();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TwoPhaseCommitTool.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TwoPhaseCommitTool.cs b/src/Lucene.Net.Core/Index/TwoPhaseCommitTool.cs
deleted file mode 100644
index 2b60ce6..0000000
--- a/src/Lucene.Net.Core/Index/TwoPhaseCommitTool.cs
+++ /dev/null
@@ -1,201 +0,0 @@
-using System;
-#if FEATURE_SERIALIZABLE
-using System.Runtime.Serialization;
-#endif
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// A utility for executing 2-phase commit on several objects.
-    /// </summary>
-    /// <seealso cref= ITwoPhaseCommit
-    /// @lucene.experimental </seealso>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public sealed class TwoPhaseCommitTool
-    {
-        /// <summary>
-        /// No instance </summary>
-        private TwoPhaseCommitTool()
-        {
-        }
-
-        /// <summary>
-        /// Thrown by <seealso cref="TwoPhaseCommitTool#execute(TwoPhaseCommit...)"/> when an
-        /// object fails to prepareCommit().
-        /// </summary>
-        // LUCENENET: All exeption classes should be marked serializable
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public class PrepareCommitFailException
-            : System.IO.IOException
-        {
-            /// <summary>
-            /// Sole constructor. </summary>
-            public PrepareCommitFailException(Exception cause, ITwoPhaseCommit obj)
-                : base("prepareCommit() failed on " + obj, cause)
-            {
-            }
-
-            // For testing purposes
-            internal PrepareCommitFailException(string message)
-                : base(message)
-            {
-            }
-
-#if FEATURE_SERIALIZABLE
-            /// <summary>
-            /// Initializes a new instance of this class with serialized data.
-            /// </summary>
-            /// <param name="info">The <see cref="SerializationInfo"/> that holds the serialized object data about the exception being thrown.</param>
-            /// <param name="context">The <see cref="StreamingContext"/> that contains contextual information about the source or destination.</param>
-            public PrepareCommitFailException(SerializationInfo info, StreamingContext context)
-                : base(info, context)
-            {
-            }
-#endif
-        }
-
-        /// <summary>
-        /// Thrown by <seealso cref="TwoPhaseCommitTool#execute(TwoPhaseCommit...)"/> when an
-        /// object fails to commit().
-        /// </summary>
-        // LUCENENET: All exeption classes should be marked serializable
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public class CommitFailException : System.IO.IOException
-        {
-            /// <summary>
-            /// Sole constructor. </summary>
-            public CommitFailException(Exception cause, ITwoPhaseCommit obj)
-                : base("commit() failed on " + obj, cause)
-            {
-            }
-
-            // For testing purposes
-            internal CommitFailException(string message)
-                : base(message)
-            {
-            }
-
-#if FEATURE_SERIALIZABLE
-            /// <summary>
-            /// Initializes a new instance of this class with serialized data.
-            /// </summary>
-            /// <param name="info">The <see cref="SerializationInfo"/> that holds the serialized object data about the exception being thrown.</param>
-            /// <param name="context">The <see cref="StreamingContext"/> that contains contextual information about the source or destination.</param>
-            public CommitFailException(SerializationInfo info, StreamingContext context)
-                : base(info, context)
-            {
-            }
-#endif
-        }
-
-        /// <summary>
-        /// rollback all objects, discarding any exceptions that occur. </summary>
-        private static void Rollback(params ITwoPhaseCommit[] objects)
-        {
-            foreach (ITwoPhaseCommit tpc in objects)
-            {
-                // ignore any exception that occurs during rollback - we want to ensure
-                // all objects are rolled-back.
-                if (tpc != null)
-                {
-                    try
-                    {
-                        tpc.Rollback();
-                    }
-#pragma warning disable 168
-                    catch (Exception t)
-#pragma warning restore 168
-                    {
-                    }
-                }
-            }
-        }
-
-        /// <summary>
-        /// Executes a 2-phase commit algorithm by first
-        /// <seealso cref="ITwoPhaseCommit#prepareCommit()"/> all objects and only if all succeed,
-        /// it proceeds with <seealso cref="ITwoPhaseCommit#commit()"/>. If any of the objects
-        /// fail on either the preparation or actual commit, it terminates and
-        /// <seealso cref="ITwoPhaseCommit#rollback()"/> all of them.
-        /// <p>
-        /// <b>NOTE:</b> it may happen that an object fails to commit, after few have
-        /// already successfully committed. this tool will still issue a rollback
-        /// instruction on them as well, but depending on the implementation, it may
-        /// not have any effect.
-        /// <p>
-        /// <b>NOTE:</b> if any of the objects are {@code null}, this method simply
-        /// skips over them.
-        /// </summary>
-        /// <exception cref="PrepareCommitFailException">
-        ///           if any of the objects fail to
-        ///           <seealso cref="ITwoPhaseCommit#prepareCommit()"/> </exception>
-        /// <exception cref="CommitFailException">
-        ///           if any of the objects fail to <seealso cref="ITwoPhaseCommit#commit()"/> </exception>
-        public static void Execute(params ITwoPhaseCommit[] objects)
-        {
-            ITwoPhaseCommit tpc = null;
-            try
-            {
-                // first, all should successfully prepareCommit()
-                for (int i = 0; i < objects.Length; i++)
-                {
-                    tpc = objects[i];
-                    if (tpc != null)
-                    {
-                        tpc.PrepareCommit();
-                    }
-                }
-            }
-            catch (Exception t)
-            {
-                // first object that fails results in rollback all of them and
-                // throwing an exception.
-                Rollback(objects);
-                throw new PrepareCommitFailException(t, tpc);
-            }
-
-            // If all successfully prepareCommit(), attempt the actual commit()
-            try
-            {
-                for (int i = 0; i < objects.Length; i++)
-                {
-                    tpc = objects[i];
-                    if (tpc != null)
-                    {
-                        tpc.Commit();
-                    }
-                }
-            }
-            catch (Exception t)
-            {
-                // first object that fails results in rollback all of them and
-                // throwing an exception.
-                Rollback(objects);
-                throw new CommitFailException(t, tpc);
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/TwoStoredFieldsConsumers.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/TwoStoredFieldsConsumers.cs b/src/Lucene.Net.Core/Index/TwoStoredFieldsConsumers.cs
deleted file mode 100644
index 5f4e59c..0000000
--- a/src/Lucene.Net.Core/Index/TwoStoredFieldsConsumers.cs
+++ /dev/null
@@ -1,80 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// Just switches between two <seealso cref="DocFieldConsumer"/>s. </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal class TwoStoredFieldsConsumers : StoredFieldsConsumer
-    {
-        private readonly StoredFieldsConsumer first;
-        private readonly StoredFieldsConsumer second;
-
-        public TwoStoredFieldsConsumers(StoredFieldsConsumer first, StoredFieldsConsumer second)
-        {
-            this.first = first;
-            this.second = second;
-        }
-
-        public override void AddField(int docID, IIndexableField field, FieldInfo fieldInfo)
-        {
-            first.AddField(docID, field, fieldInfo);
-            second.AddField(docID, field, fieldInfo);
-        }
-
-        public override void Flush(SegmentWriteState state) // LUCENENET NOTE: original was internal, but other implementations require public
-        {
-            first.Flush(state);
-            second.Flush(state);
-        }
-
-        public override void Abort() // LUCENENET NOTE: original was internal, but other implementations require public
-        {
-            try
-            {
-                first.Abort();
-            }
-            catch (Exception)
-            {
-            }
-            try
-            {
-                second.Abort();
-            }
-            catch (Exception)
-            {
-            }
-        }
-
-        public override void StartDocument() // LUCENENET NOTE: original was internal, but other implementations require public
-        {
-            first.StartDocument();
-            second.StartDocument();
-        }
-
-        internal override void FinishDocument()
-        {
-            first.FinishDocument();
-            second.FinishDocument();
-        }
-    }
-}
\ No newline at end of file


Mime
View raw message