lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [20/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:26 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs b/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs
deleted file mode 100644
index 81e5965..0000000
--- a/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs
+++ /dev/null
@@ -1,192 +0,0 @@
-using Lucene.Net.Support;
-using System.Collections.Generic;
-using System;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// Holds updates of a single <see cref="DocValues"/> field, for a set of documents.
-    /// <para/>
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal abstract class DocValuesFieldUpdates
-    {
-        // LUCENENET specific: de-nested Type enum and renamed DocValuesFieldUpdatesType
-
-        /// <summary>
-        /// An iterator over documents and their updated values. Only documents with
-        /// updates are returned by this iterator, and the documents are returned in
-        /// increasing order.
-        /// </summary>
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public abstract class Iterator
-        {
-            /// <summary>
-            /// Returns the next document which has an update, or
-            /// <see cref="Search.DocIdSetIterator.NO_MORE_DOCS"/> if there are no more documents to
-            /// return.
-            /// </summary>
-            public abstract int NextDoc();
-
-            /// <summary>
-            /// Returns the current document this iterator is on. </summary>
-            public abstract int Doc { get; }
-
-            /// <summary>
-            /// Returns the value of the document returned from <see cref="NextDoc()"/>. A
-            /// <c>null</c> value means that it was unset for this document.
-            /// </summary>
-            public abstract object Value { get; }
-
-            /// <summary>
-            /// Reset the iterator's state. Should be called before <see cref="NextDoc()"/>
-            /// and <seealso cref="#value()"/>.
-            /// </summary>
-            public abstract void Reset();
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public class Container
-        {
-            internal readonly IDictionary<string, NumericDocValuesFieldUpdates> numericDVUpdates = new Dictionary<string, NumericDocValuesFieldUpdates>();
-            internal readonly IDictionary<string, BinaryDocValuesFieldUpdates> binaryDVUpdates = new Dictionary<string, BinaryDocValuesFieldUpdates>();
-
-            internal virtual bool Any()
-            {
-                foreach (NumericDocValuesFieldUpdates updates in numericDVUpdates.Values)
-                {
-                    if (updates.Any())
-                    {
-                        return true;
-                    }
-                }
-                foreach (BinaryDocValuesFieldUpdates updates in binaryDVUpdates.Values)
-                {
-                    if (updates.Any())
-                    {
-                        return true;
-                    }
-                }
-                return false;
-            }
-
-            internal virtual int Count // LUCENENET NOTE: This was size() in Lucene.
-            {
-                get { return numericDVUpdates.Count + binaryDVUpdates.Count; }
-            }
-
-            internal virtual DocValuesFieldUpdates GetUpdates(string field, DocValuesFieldUpdatesType type)
-            {
-                switch (type)
-                {
-                    case DocValuesFieldUpdatesType.NUMERIC:
-                        NumericDocValuesFieldUpdates num;
-                        numericDVUpdates.TryGetValue(field, out num);
-                        return num;
-
-                    case DocValuesFieldUpdatesType.BINARY:
-                        BinaryDocValuesFieldUpdates bin;
-                        binaryDVUpdates.TryGetValue(field, out bin);
-                        return bin;
-
-                    default:
-                        throw new System.ArgumentException("unsupported type: " + type);
-                }
-            }
-
-            internal virtual DocValuesFieldUpdates NewUpdates(string field, DocValuesFieldUpdatesType type, int maxDoc)
-            {
-                switch (type)
-                {
-                    case DocValuesFieldUpdatesType.NUMERIC:
-                        NumericDocValuesFieldUpdates numericUpdates;
-                        Debug.Assert(!numericDVUpdates.TryGetValue(field, out numericUpdates));
-                        numericUpdates = new NumericDocValuesFieldUpdates(field, maxDoc);
-                        numericDVUpdates[field] = numericUpdates;
-                        return numericUpdates;
-
-                    case DocValuesFieldUpdatesType.BINARY:
-                        BinaryDocValuesFieldUpdates binaryUpdates;
-                        Debug.Assert(!binaryDVUpdates.TryGetValue(field, out binaryUpdates));
-                        binaryUpdates = new BinaryDocValuesFieldUpdates(field, maxDoc);
-                        binaryDVUpdates[field] = binaryUpdates;
-                        return binaryUpdates;
-
-                    default:
-                        throw new System.ArgumentException("unsupported type: " + type);
-                }
-            }
-
-            public override string ToString()
-            {
-                return "numericDVUpdates=" + Arrays.ToString(numericDVUpdates) + " binaryDVUpdates=" + Arrays.ToString(binaryDVUpdates);
-            }
-        }
-
-        internal readonly string field;
-        internal readonly DocValuesFieldUpdatesType type;
-
-        protected internal DocValuesFieldUpdates(string field, DocValuesFieldUpdatesType type)
-        {
-            this.field = field;
-            this.type = type;
-        }
-
-        /// <summary>
-        /// Add an update to a document. For unsetting a value you should pass
-        /// <c>null</c>.
-        /// </summary>
-        public abstract void Add(int doc, object value);
-
-        /// <summary>
-        /// Returns an <see cref="Iterator"/> over the updated documents and their
-        /// values.
-        /// </summary>
-        public abstract Iterator GetIterator();
-
-        /// <summary>
-        /// Merge with another <see cref="DocValuesFieldUpdates"/>. this is called for a
-        /// segment which received updates while it was being merged. The given updates
-        /// should override whatever updates are in that instance.
-        /// </summary>
-        public abstract void Merge(DocValuesFieldUpdates other);
-
-        /// <summary>
-        /// Returns true if this instance contains any updates. </summary>
-        /// <returns> TODO </returns>
-        public abstract bool Any();
-    }
-
-    // LUCENENET specific - de-nested Type enumeration and renamed DocValuesFieldUpdatesType
-    // primarily so it doesn't conflict with System.Type.
-    public enum DocValuesFieldUpdatesType
-    {
-        NUMERIC,
-        BINARY
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocValuesProcessor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocValuesProcessor.cs b/src/Lucene.Net.Core/Index/DocValuesProcessor.cs
deleted file mode 100644
index 3f6da3a..0000000
--- a/src/Lucene.Net.Core/Index/DocValuesProcessor.cs
+++ /dev/null
@@ -1,241 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using Counter = Lucene.Net.Util.Counter;
-    using DocValuesConsumer = Lucene.Net.Codecs.DocValuesConsumer;
-    using DocValuesFormat = Lucene.Net.Codecs.DocValuesFormat;
-    using IOUtils = Lucene.Net.Util.IOUtils;
-
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal sealed class DocValuesProcessor : StoredFieldsConsumer
-    {
-        // TODO: somewhat wasteful we also keep a map here; would
-        // be more efficient if we could "reuse" the map/hash
-        // lookup DocFieldProcessor already did "above"
-        private readonly IDictionary<string, DocValuesWriter> writers = new Dictionary<string, DocValuesWriter>();
-
-        private readonly Counter bytesUsed;
-
-        public DocValuesProcessor(Counter bytesUsed)
-        {
-            this.bytesUsed = bytesUsed;
-        }
-
-        public override void StartDocument()
-        {
-        }
-
-        internal override void FinishDocument()
-        {
-        }
-
-        public override void AddField(int docID, IIndexableField field, FieldInfo fieldInfo)
-        {
-            DocValuesType dvType = field.FieldType.DocValueType;
-            if (dvType != DocValuesType.NONE)
-            {
-                fieldInfo.DocValuesType = dvType;
-                if (dvType == DocValuesType.BINARY)
-                {
-                    AddBinaryField(fieldInfo, docID, field.GetBinaryValue());
-                }
-                else if (dvType == DocValuesType.SORTED)
-                {
-                    AddSortedField(fieldInfo, docID, field.GetBinaryValue());
-                }
-                else if (dvType == DocValuesType.SORTED_SET)
-                {
-                    AddSortedSetField(fieldInfo, docID, field.GetBinaryValue());
-                }
-                else if (dvType == DocValuesType.NUMERIC)
-                {
-                    if (!(field.GetNumericValue() is long?))
-                    {
-                        throw new System.ArgumentException("illegal type " + field.GetNumericValue().GetType() + ": DocValues types must be Long");
-                    }
-                    AddNumericField(fieldInfo, docID, (long)field.GetNumericValue());
-                }
-                else
-                {
-                    Debug.Assert(false, "unrecognized DocValues.Type: " + dvType);
-                }
-            }
-        }
-
-        public override void Flush(SegmentWriteState state)
-        {
-            if (writers.Count > 0)
-            {
-                DocValuesFormat fmt = state.SegmentInfo.Codec.DocValuesFormat;
-                DocValuesConsumer dvConsumer = fmt.FieldsConsumer(state);
-                bool success = false;
-                try
-                {
-                    foreach (DocValuesWriter writer in writers.Values)
-                    {
-                        writer.Finish(state.SegmentInfo.DocCount);
-                        writer.Flush(state, dvConsumer);
-                    }
-                    // TODO: catch missing DV fields here?  else we have
-                    // null/"" depending on how docs landed in segments?
-                    // but we can't detect all cases, and we should leave
-                    // this behavior undefined. dv is not "schemaless": its column-stride.
-                    writers.Clear();
-                    success = true;
-                }
-                finally
-                {
-                    if (success)
-                    {
-                        IOUtils.Close(dvConsumer);
-                    }
-                    else
-                    {
-                        IOUtils.CloseWhileHandlingException(dvConsumer);
-                    }
-                }
-            }
-        }
-
-        internal void AddBinaryField(FieldInfo fieldInfo, int docID, BytesRef value)
-        {
-            DocValuesWriter writer;
-            writers.TryGetValue(fieldInfo.Name, out writer);
-            BinaryDocValuesWriter binaryWriter;
-            if (writer == null)
-            {
-                binaryWriter = new BinaryDocValuesWriter(fieldInfo, bytesUsed);
-                writers[fieldInfo.Name] = binaryWriter;
-            }
-            else if (!(writer is BinaryDocValuesWriter))
-            {
-                throw new System.ArgumentException("Incompatible DocValues type: field \"" + fieldInfo.Name + "\" changed from " + GetTypeDesc(writer) + " to binary");
-            }
-            else
-            {
-                binaryWriter = (BinaryDocValuesWriter)writer;
-            }
-            binaryWriter.AddValue(docID, value);
-        }
-
-        internal void AddSortedField(FieldInfo fieldInfo, int docID, BytesRef value)
-        {
-            DocValuesWriter writer;
-            writers.TryGetValue(fieldInfo.Name, out writer);
-            SortedDocValuesWriter sortedWriter;
-            if (writer == null)
-            {
-                sortedWriter = new SortedDocValuesWriter(fieldInfo, bytesUsed);
-                writers[fieldInfo.Name] = sortedWriter;
-            }
-            else if (!(writer is SortedDocValuesWriter))
-            {
-                throw new System.ArgumentException("Incompatible DocValues type: field \"" + fieldInfo.Name + "\" changed from " + GetTypeDesc(writer) + " to sorted");
-            }
-            else
-            {
-                sortedWriter = (SortedDocValuesWriter)writer;
-            }
-            sortedWriter.AddValue(docID, value);
-        }
-
-        internal void AddSortedSetField(FieldInfo fieldInfo, int docID, BytesRef value)
-        {
-            DocValuesWriter writer;
-            writers.TryGetValue(fieldInfo.Name, out writer);
-            SortedSetDocValuesWriter sortedSetWriter;
-            if (writer == null)
-            {
-                sortedSetWriter = new SortedSetDocValuesWriter(fieldInfo, bytesUsed);
-                writers[fieldInfo.Name] = sortedSetWriter;
-            }
-            else if (!(writer is SortedSetDocValuesWriter))
-            {
-                throw new System.ArgumentException("Incompatible DocValues type: field \"" + fieldInfo.Name + "\" changed from " + GetTypeDesc(writer) + " to sorted");
-            }
-            else
-            {
-                sortedSetWriter = (SortedSetDocValuesWriter)writer;
-            }
-            sortedSetWriter.AddValue(docID, value);
-        }
-
-        internal void AddNumericField(FieldInfo fieldInfo, int docID, long value)
-        {
-            DocValuesWriter writer;
-            writers.TryGetValue(fieldInfo.Name, out writer);
-            NumericDocValuesWriter numericWriter;
-            if (writer == null)
-            {
-                numericWriter = new NumericDocValuesWriter(fieldInfo, bytesUsed, true);
-                writers[fieldInfo.Name] = numericWriter;
-            }
-            else if (!(writer is NumericDocValuesWriter))
-            {
-                throw new System.ArgumentException("Incompatible DocValues type: field \"" + fieldInfo.Name + "\" changed from " + GetTypeDesc(writer) + " to numeric");
-            }
-            else
-            {
-                numericWriter = (NumericDocValuesWriter)writer;
-            }
-            numericWriter.AddValue(docID, value);
-        }
-
-        private string GetTypeDesc(DocValuesWriter obj)
-        {
-            if (obj is BinaryDocValuesWriter)
-            {
-                return "binary";
-            }
-            else if (obj is NumericDocValuesWriter)
-            {
-                return "numeric";
-            }
-            else
-            {
-                Debug.Assert(obj is SortedDocValuesWriter);
-                return "sorted";
-            }
-        }
-
-        public override void Abort()
-        {
-            foreach (DocValuesWriter writer in writers.Values)
-            {
-                try
-                {
-                    writer.Abort();
-                }
-#pragma warning disable 168
-                catch (Exception t)
-#pragma warning restore 168
-                {
-                }
-            }
-            writers.Clear();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocValuesUpdate.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocValuesUpdate.cs b/src/Lucene.Net.Core/Index/DocValuesUpdate.cs
deleted file mode 100644
index c6f562b..0000000
--- a/src/Lucene.Net.Core/Index/DocValuesUpdate.cs
+++ /dev/null
@@ -1,124 +0,0 @@
-using Lucene.Net.Documents;
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using NumericDocValuesField = NumericDocValuesField;
-    using RamUsageEstimator = Lucene.Net.Util.RamUsageEstimator;
-
-    /// <summary>
-    /// An in-place update to a <see cref="DocValues"/> field. </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class DocValuesUpdate
-    {
-        /* Rough logic: OBJ_HEADER + 3*PTR + INT
-         * Term: OBJ_HEADER + 2*PTR
-         *   Term.field: 2*OBJ_HEADER + 4*INT + PTR + string.length*CHAR
-         *   Term.bytes: 2*OBJ_HEADER + 2*INT + PTR + bytes.length
-         * String: 2*OBJ_HEADER + 4*INT + PTR + string.length*CHAR
-         * T: OBJ_HEADER
-         */
-        private static readonly int RAW_SIZE_IN_BYTES = 8 * RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 8 * RamUsageEstimator.NUM_BYTES_OBJECT_REF + 8 * RamUsageEstimator.NUM_BYTES_INT32;
-
-        internal readonly DocValuesFieldUpdatesType type;
-        internal readonly Term term;
-        internal readonly string field;
-        internal readonly object value;
-        internal int docIDUpto = -1; // unassigned until applied, and confusing that it's here, when it's just used in BufferedDeletes...
-
-        /// <summary>
-        /// Constructor.
-        /// </summary>
-        /// <param name="type"> the <see cref="DocValuesFieldUpdatesType"/> </param>
-        /// <param name="term"> the <see cref="Term"/> which determines the documents that will be updated </param>
-        /// <param name="field"> the <see cref="NumericDocValuesField"/> to update </param>
-        /// <param name="value"> the updated value </param>
-        protected DocValuesUpdate(DocValuesFieldUpdatesType type, Term term, string field, object value)
-        {
-            this.type = type;
-            this.term = term;
-            this.field = field;
-            this.value = value;
-        }
-
-        internal abstract long GetValueSizeInBytes();
-
-        internal int GetSizeInBytes()
-        {
-            int sizeInBytes = RAW_SIZE_IN_BYTES;
-            sizeInBytes += term.Field.Length * RamUsageEstimator.NUM_BYTES_CHAR;
-            sizeInBytes += term.Bytes.Bytes.Length;
-            sizeInBytes += field.Length * RamUsageEstimator.NUM_BYTES_CHAR;
-            sizeInBytes += (int)GetValueSizeInBytes();
-            return sizeInBytes;
-        }
-
-        public override string ToString()
-        {
-            return "term=" + term + ",field=" + field + ",value=" + value;
-        }
-
-        /// <summary>
-        /// An in-place update to a binary <see cref="DocValues"/> field </summary>
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public sealed class BinaryDocValuesUpdate : DocValuesUpdate
-        {
-            /* Size of BytesRef: 2*INT + ARRAY_HEADER + PTR */
-            private static readonly long RAW_VALUE_SIZE_IN_BYTES = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 2 * RamUsageEstimator.NUM_BYTES_INT32 + RamUsageEstimator.NUM_BYTES_OBJECT_REF;
-
-            internal static readonly BytesRef MISSING = new BytesRef();
-
-            internal BinaryDocValuesUpdate(Term term, string field, BytesRef value)
-                : base(DocValuesFieldUpdatesType.BINARY, term, field, value == null ? MISSING : value)
-            {
-            }
-
-            internal override long GetValueSizeInBytes()
-            {
-                return RAW_VALUE_SIZE_IN_BYTES + ((BytesRef)value).Bytes.Length;
-            }
-        }
-
-        /// <summary>
-        /// An in-place update to a numeric <see cref="DocValues"/> field </summary>
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        public sealed class NumericDocValuesUpdate : DocValuesUpdate // LUCENENET NOTE: Made public rather than internal because it is on a public API
-        {
-            internal static readonly long? MISSING = new long?(0);
-
-            public NumericDocValuesUpdate(Term term, string field, long? value)
-                : base(DocValuesFieldUpdatesType.NUMERIC, term, field, value == null ? MISSING : value)
-            {
-            }
-
-            internal override long GetValueSizeInBytes()
-            {
-                return RamUsageEstimator.NUM_BYTES_INT64;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocValuesWriter.cs b/src/Lucene.Net.Core/Index/DocValuesWriter.cs
deleted file mode 100644
index 85e9b0f..0000000
--- a/src/Lucene.Net.Core/Index/DocValuesWriter.cs
+++ /dev/null
@@ -1,35 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using DocValuesConsumer = Lucene.Net.Codecs.DocValuesConsumer;
-
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal abstract class DocValuesWriter
-    {
-        public abstract void Abort();
-
-        public abstract void Finish(int numDoc);
-
-        public abstract void Flush(SegmentWriteState state, DocValuesConsumer consumer);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs b/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs
deleted file mode 100644
index 17fb5e4..0000000
--- a/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs
+++ /dev/null
@@ -1,97 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    // javadocs
-    using BytesRef = Lucene.Net.Util.BytesRef;
-
-    // LUCENENET specific - converted constants from DocsAndPositionsEnum
-    // into a flags enum.
-    [Flags]
-    public enum DocsAndPositionsFlags
-    {
-        /// <summary>
-        /// Flag to pass to <see cref="TermsEnum.DocsAndPositions(Util.IBits, DocsAndPositionsEnum, DocsAndPositionsFlags)"/> 
-        /// if you require that no offsets and payloads will be returned.
-        /// </summary>
-        NONE = 0x0,
-
-        /// <summary>
-        /// Flag to pass to <see cref="TermsEnum.DocsAndPositions(Util.IBits, DocsAndPositionsEnum, DocsAndPositionsFlags)"/>
-        /// if you require offsets in the returned enum.
-        /// </summary>
-        OFFSETS = 0x1, // LUCENENET specific - renamed from FLAG_OFFSETS since FLAG_ makes it redundant
-
-        /// <summary>
-        /// Flag to pass to  <see cref="TermsEnum.DocsAndPositions(Util.IBits, DocsAndPositionsEnum, DocsAndPositionsFlags)"/>
-        /// if you require payloads in the returned enum.
-        /// </summary>
-        PAYLOADS = 0x2 // LUCENENET specific - renamed from FLAG_PAYLOADS since FLAG_ makes it redundant
-    }
-
-
-    /// <summary>
-    /// Also iterates through positions. </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class DocsAndPositionsEnum : DocsEnum
-    {
-        // LUCENENET specific - made flags into their own [Flags] enum named DocsAndPositionsFlags and de-nested from this type
-
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass
-        /// constructors, typically implicit.)
-        /// </summary>
-        protected DocsAndPositionsEnum()
-        {
-        }
-
-        /// <summary>
-        /// Returns the next position.  You should only call this
-        /// up to <see cref="DocsEnum.Freq"/> times else
-        /// the behavior is not defined.  If positions were not
-        /// indexed this will return -1; this only happens if
-        /// offsets were indexed and you passed needsOffset=true
-        /// when pulling the enum.
-        /// </summary>
-        public abstract int NextPosition();
-
-        /// <summary>
-        /// Returns start offset for the current position, or -1
-        /// if offsets were not indexed.
-        /// </summary>
-        public abstract int StartOffset { get; }
-
-        /// <summary>
-        /// Returns end offset for the current position, or -1 if
-        /// offsets were not indexed.
-        /// </summary>
-        public abstract int EndOffset { get; }
-
-        /// <summary>
-        /// Returns the payload at this position, or <c>null</c> if no
-        /// payload was indexed. You should not modify anything
-        /// (neither members of the returned <see cref="BytesRef"/> nor bytes
-        /// in the <see cref="T:byte[]"/>).
-        /// </summary>
-        public abstract BytesRef GetPayload();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocsEnum.cs b/src/Lucene.Net.Core/Index/DocsEnum.cs
deleted file mode 100644
index 32b022e..0000000
--- a/src/Lucene.Net.Core/Index/DocsEnum.cs
+++ /dev/null
@@ -1,91 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using AttributeSource = Lucene.Net.Util.AttributeSource;
-    using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
-
-    // LUCENENET specific - converted constants from DocsEnum
-    // into a flags enum.
-    [Flags]
-    public enum DocsFlags
-    {
-        /// <summary>
-        /// Flag to pass to <see cref="TermsEnum.Docs(Util.IBits, DocsEnum, DocsFlags)"/> if you don't
-        /// require term frequencies in the returned enum.
-        /// </summary>
-        NONE = 0x0,
-
-        /// <summary>
-        /// Flag to pass to <see cref="TermsEnum.Docs(Util.IBits, DocsEnum, DocsFlags)"/>
-        /// if you require term frequencies in the returned enum.
-        /// </summary>
-        FREQS = 0x1
-    }
-
-    /// <summary>
-    /// Iterates through the documents and term freqs.
-    /// NOTE: you must first call <see cref="DocIdSetIterator.NextDoc()"/> before using
-    /// any of the per-doc methods.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class DocsEnum : DocIdSetIterator
-    {
-        // LUCENENET specific - made flags into their own [Flags] enum named DocsFlags and de-nested from this type
-
-        private AttributeSource atts = null;
-
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass
-        /// constructors, typically implicit.)
-        /// </summary>
-        protected DocsEnum()
-        {
-        }
-
-        /// <summary>
-        /// Returns term frequency in the current document, or 1 if the field was
-        /// indexed with <see cref="IndexOptions.DOCS_ONLY"/>. Do not call this before
-        /// <see cref="DocIdSetIterator.NextDoc()"/> is first called, nor after <see cref="DocIdSetIterator.NextDoc()"/> returns
-        /// <see cref="DocIdSetIterator.NO_MORE_DOCS"/>.
-        ///
-        /// <para/>
-        /// <b>NOTE:</b> if the <see cref="DocsEnum"/> was obtain with <see cref="DocsFlags.NONE"/>,
-        /// the result of this method is undefined.
-        /// </summary>
-        public abstract int Freq { get; }
-
-        /// <summary>
-        /// Returns the related attributes. </summary>
-        public virtual AttributeSource Attributes
-        {
-            get
-            {
-                if (atts == null)
-                {
-                    atts = new AttributeSource();
-                }
-                return atts;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/DocumentsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocumentsWriter.cs b/src/Lucene.Net.Core/Index/DocumentsWriter.cs
deleted file mode 100644
index 3577487..0000000
--- a/src/Lucene.Net.Core/Index/DocumentsWriter.cs
+++ /dev/null
@@ -1,935 +0,0 @@
-using Lucene.Net.Support;
-using System;
-using System.Collections.Concurrent;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Threading;
-using System.Reflection;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using Analyzer = Lucene.Net.Analysis.Analyzer;
-    using BinaryDocValuesUpdate = Lucene.Net.Index.DocValuesUpdate.BinaryDocValuesUpdate;
-    using BytesRef = Lucene.Net.Util.BytesRef;
-    using Directory = Lucene.Net.Store.Directory;
-    using IEvent = Lucene.Net.Index.IndexWriter.IEvent;
-    using FlushedSegment = Lucene.Net.Index.DocumentsWriterPerThread.FlushedSegment;
-    using InfoStream = Lucene.Net.Util.InfoStream;
-    using NumericDocValuesUpdate = Lucene.Net.Index.DocValuesUpdate.NumericDocValuesUpdate;
-    using Query = Lucene.Net.Search.Query;
-    using SegmentFlushTicket = Lucene.Net.Index.DocumentsWriterFlushQueue.SegmentFlushTicket;
-    using ThreadState = Lucene.Net.Index.DocumentsWriterPerThreadPool.ThreadState;
-
-    /// <summary>
-    /// This class accepts multiple added documents and directly
-    /// writes segment files.
-    /// <para/>
-    /// Each added document is passed to the <see cref="DocConsumer"/>,
-    /// which in turn processes the document and interacts with
-    /// other consumers in the indexing chain.  Certain
-    /// consumers, like <see cref="StoredFieldsConsumer"/> and 
-    /// <see cref="TermVectorsConsumer"/>, digest a document and
-    /// immediately write bytes to the "doc store" files (ie,
-    /// they do not consume RAM per document, except while they
-    /// are processing the document).
-    /// <para/>
-    /// Other consumers, eg <see cref="FreqProxTermsWriter"/> and
-    /// <see cref="NormsConsumer"/>, buffer bytes in RAM and flush only
-    /// when a new segment is produced.
-    /// <para/>
-    /// Once we have used our allowed RAM buffer, or the number
-    /// of added docs is large enough (in the case we are
-    /// flushing by doc count instead of RAM usage), we create a
-    /// real segment and flush it to the Directory.
-    /// <para/>
-    /// Threads:
-    /// <para/>
-    /// Multiple threads are allowed into AddDocument at once.
-    /// There is an initial synchronized call to <see cref="DocumentsWriterPerThreadPool.GetThreadState(int)"/>
-    /// which allocates a <see cref="ThreadState"/> for this thread.  The same
-    /// thread will get the same <see cref="ThreadState"/> over time (thread
-    /// affinity) so that if there are consistent patterns (for
-    /// example each thread is indexing a different content
-    /// source) then we make better use of RAM.  Then
-    /// ProcessDocument is called on that <see cref="ThreadState"/> without
-    /// synchronization (most of the "heavy lifting" is in this
-    /// call).  Finally the synchronized "finishDocument" is
-    /// called to flush changes to the directory.
-    /// <para/>
-    /// When flush is called by <see cref="IndexWriter"/> we forcefully idle
-    /// all threads and flush only once they are all idle.  this
-    /// means you can call flush with a given thread even while
-    /// other threads are actively adding/deleting documents.
-    /// <para/>
-    ///
-    /// Exceptions:
-    /// <para/>
-    /// Because this class directly updates in-memory posting
-    /// lists, and flushes stored fields and term vectors
-    /// directly to files in the directory, there are certain
-    /// limited times when an exception can corrupt this state.
-    /// For example, a disk full while flushing stored fields
-    /// leaves this file in a corrupt state.  Or, an OOM
-    /// exception while appending to the in-memory posting lists
-    /// can corrupt that posting list.  We call such exceptions
-    /// "aborting exceptions".  In these cases we must call
-    /// <see cref="Abort(IndexWriter)"/> to discard all docs added since the last flush.
-    /// <para/>
-    /// All other exceptions ("non-aborting exceptions") can
-    /// still partially update the index structures.  These
-    /// updates are consistent, but, they represent only a part
-    /// of the document seen up until the exception was hit.
-    /// When this happens, we immediately mark the document as
-    /// deleted so that the document is always atomically ("all
-    /// or none") added to the index.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal sealed class DocumentsWriter : IDisposable
-    {
-        private readonly Directory directory;
-
-        private volatile bool closed;
-
-        private readonly InfoStream infoStream;
-
-        private readonly LiveIndexWriterConfig config;
-
-        private readonly AtomicInt32 numDocsInRAM = new AtomicInt32(0);
-
-        // TODO: cut over to BytesRefHash in BufferedDeletes
-        internal volatile DocumentsWriterDeleteQueue deleteQueue = new DocumentsWriterDeleteQueue();
-
-        private readonly DocumentsWriterFlushQueue ticketQueue = new DocumentsWriterFlushQueue();
-
-        /// <summary>
-        /// we preserve changes during a full flush since IW might not checkout before
-        /// we release all changes. NRT Readers otherwise suddenly return true from
-        /// IsCurrent() while there are actually changes currently committed. See also
-        /// <see cref="AnyChanges()"/> &amp; <see cref="FlushAllThreads(IndexWriter)"/>
-        /// </summary>
-        private volatile bool pendingChangesInCurrentFullFlush;
-
-        internal readonly DocumentsWriterPerThreadPool perThreadPool;
-        internal readonly FlushPolicy flushPolicy;
-        internal readonly DocumentsWriterFlushControl flushControl;
-        private readonly IndexWriter writer;
-        private readonly ConcurrentQueue<IEvent> events;
-
-        internal DocumentsWriter(IndexWriter writer, LiveIndexWriterConfig config, Directory directory)
-        {
-            this.directory = directory;
-            this.config = config;
-            this.infoStream = config.InfoStream;
-            this.perThreadPool = config.IndexerThreadPool;
-            flushPolicy = config.FlushPolicy;
-            this.writer = writer;
-            this.events = new ConcurrentQueue<IEvent>();
-            flushControl = new DocumentsWriterFlushControl(this, config, writer.bufferedUpdatesStream);
-        }
-
-        internal bool DeleteQueries(params Query[] queries)
-        {
-            lock (this)
-            {
-                // TODO why is this synchronized?
-                DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue;
-                deleteQueue.AddDelete(queries);
-                flushControl.DoOnDelete();
-                return ApplyAllDeletes(deleteQueue);
-            }
-        }
-
-        // TODO: we could check w/ FreqProxTermsWriter: if the
-        // term doesn't exist, don't bother buffering into the
-        // per-DWPT map (but still must go into the global map)
-        internal bool DeleteTerms(params Term[] terms)
-        {
-            lock (this)
-            {
-                // TODO why is this synchronized?
-                DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue;
-                deleteQueue.AddDelete(terms);
-                flushControl.DoOnDelete();
-                return ApplyAllDeletes(deleteQueue);
-            }
-        }
-
-        internal bool UpdateNumericDocValue(Term term, string field, long? value)
-        {
-            lock (this)
-            {
-                DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue;
-                deleteQueue.AddNumericUpdate(new NumericDocValuesUpdate(term, field, value));
-                flushControl.DoOnDelete();
-                return ApplyAllDeletes(deleteQueue);
-            }
-        }
-
-        internal bool UpdateBinaryDocValue(Term term, string field, BytesRef value)
-        {
-            lock (this)
-            {
-                DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue;
-                deleteQueue.AddBinaryUpdate(new BinaryDocValuesUpdate(term, field, value));
-                flushControl.DoOnDelete();
-                return ApplyAllDeletes(deleteQueue);
-            }
-        }
-
-        internal DocumentsWriterDeleteQueue CurrentDeleteSession
-        {
-            get { return deleteQueue; }
-        }
-
-        private bool ApplyAllDeletes(DocumentsWriterDeleteQueue deleteQueue)
-        {
-            if (flushControl.GetAndResetApplyAllDeletes())
-            {
-                if (deleteQueue != null && !flushControl.IsFullFlush)
-                {
-                    ticketQueue.AddDeletes(deleteQueue);
-                }
-                PutEvent(ApplyDeletesEvent.INSTANCE); // apply deletes event forces a purge
-                return true;
-            }
-            return false;
-        }
-
-        internal int PurgeBuffer(IndexWriter writer, bool forced)
-        {
-            if (forced)
-            {
-                return ticketQueue.ForcePurge(writer);
-            }
-            else
-            {
-                return ticketQueue.TryPurge(writer);
-            }
-        }
-
-        /// <summary>
-        /// Returns how many docs are currently buffered in RAM. </summary>
-        internal int NumDocs
-        {
-            get
-            {
-                return numDocsInRAM.Get();
-            }
-        }
-
-        private void EnsureOpen()
-        {
-            if (closed)
-            {
-                throw new ObjectDisposedException(this.GetType().GetTypeInfo().FullName, "this IndexWriter is closed");
-            }
-        }
-
-        /// <summary>
-        /// Called if we hit an exception at a bad time (when
-        ///  updating the index files) and must discard all
-        ///  currently buffered docs.  this resets our state,
-        ///  discarding any docs added since last flush.
-        /// </summary>
-        internal void Abort(IndexWriter writer)
-        {
-            lock (this)
-            {
-                //Debug.Assert(!Thread.HoldsLock(writer), "IndexWriter lock should never be hold when aborting");
-                bool success = false;
-                HashSet<string> newFilesSet = new HashSet<string>();
-                try
-                {
-                    deleteQueue.Clear();
-                    if (infoStream.IsEnabled("DW"))
-                    {
-                        infoStream.Message("DW", "abort");
-                    }
-                    int limit = perThreadPool.NumThreadStatesActive;
-                    for (int i = 0; i < limit; i++)
-                    {
-                        ThreadState perThread = perThreadPool.GetThreadState(i);
-                        perThread.@Lock();
-                        try
-                        {
-                            AbortThreadState(perThread, newFilesSet);
-                        }
-                        finally
-                        {
-                            perThread.Unlock();
-                        }
-                    }
-                    flushControl.AbortPendingFlushes(newFilesSet);
-                    PutEvent(new DeleteNewFilesEvent(newFilesSet));
-                    flushControl.WaitForFlush();
-                    success = true;
-                }
-                finally
-                {
-                    if (infoStream.IsEnabled("DW"))
-                    {
-                        infoStream.Message("DW", "done abort; abortedFiles=" + Arrays.ToString(newFilesSet) + " success=" + success);
-                    }
-                }
-            }
-        }
-
-        internal void LockAndAbortAll(IndexWriter indexWriter)
-        {
-            lock (this)
-            {
-                //Debug.Assert(indexWriter.HoldsFullFlushLock());
-                if (infoStream.IsEnabled("DW"))
-                {
-                    infoStream.Message("DW", "lockAndAbortAll");
-                }
-                bool success = false;
-                try
-                {
-                    deleteQueue.Clear();
-                    int limit = perThreadPool.MaxThreadStates;
-                    HashSet<string> newFilesSet = new HashSet<string>();
-                    for (int i = 0; i < limit; i++)
-                    {
-                        ThreadState perThread = perThreadPool.GetThreadState(i);
-                        perThread.@Lock();
-                        AbortThreadState(perThread, newFilesSet);
-                    }
-                    deleteQueue.Clear();
-                    flushControl.AbortPendingFlushes(newFilesSet);
-                    PutEvent(new DeleteNewFilesEvent(newFilesSet));
-                    flushControl.WaitForFlush();
-                    success = true;
-                }
-                finally
-                {
-                    if (infoStream.IsEnabled("DW"))
-                    {
-                        infoStream.Message("DW", "finished lockAndAbortAll success=" + success);
-                    }
-                    if (!success)
-                    {
-                        // if something happens here we unlock all states again
-                        UnlockAllAfterAbortAll(indexWriter);
-                    }
-                }
-            }
-        }
-
-        private void AbortThreadState(ThreadState perThread, ISet<string> newFiles)
-        {
-            //Debug.Assert(perThread.HeldByCurrentThread);
-            if (perThread.IsActive) // we might be closed
-            {
-                if (perThread.IsInitialized)
-                {
-                    try
-                    {
-                        SubtractFlushedNumDocs(perThread.dwpt.NumDocsInRAM);
-                        perThread.dwpt.Abort(newFiles);
-                    }
-                    finally
-                    {
-                        perThread.dwpt.CheckAndResetHasAborted();
-                        flushControl.DoOnAbort(perThread);
-                    }
-                }
-                else
-                {
-                    flushControl.DoOnAbort(perThread);
-                }
-            }
-            else
-            {
-                Debug.Assert(closed);
-            }
-        }
-
-        internal void UnlockAllAfterAbortAll(IndexWriter indexWriter)
-        {
-            lock (this)
-            {
-                //Debug.Assert(indexWriter.HoldsFullFlushLock());
-                if (infoStream.IsEnabled("DW"))
-                {
-                    infoStream.Message("DW", "unlockAll");
-                }
-                int limit = perThreadPool.MaxThreadStates;
-                for (int i = 0; i < limit; i++)
-                {
-                    try
-                    {
-                        ThreadState perThread = perThreadPool.GetThreadState(i);
-                        //if (perThread.HeldByCurrentThread)
-                        //{
-                        perThread.Unlock();
-                        //}
-                    }
-                    catch (Exception e)
-                    {
-                        if (infoStream.IsEnabled("DW"))
-                        {
-                            infoStream.Message("DW", "unlockAll: could not unlock state: " + i + " msg:" + e.Message);
-                        }
-                        // ignore & keep on unlocking
-                    }
-                }
-            }
-        }
-
-        internal bool AnyChanges()
-        {
-            if (infoStream.IsEnabled("DW"))
-            {
-                infoStream.Message("DW", "anyChanges? numDocsInRam=" + numDocsInRAM.Get() + " deletes=" + AnyDeletions() + " hasTickets:" + ticketQueue.HasTickets + " pendingChangesInFullFlush: " + pendingChangesInCurrentFullFlush);
-            }
-            /*
-             * changes are either in a DWPT or in the deleteQueue.
-             * yet if we currently flush deletes and / or dwpt there
-             * could be a window where all changes are in the ticket queue
-             * before they are published to the IW. ie we need to check if the
-             * ticket queue has any tickets.
-             */
-            return numDocsInRAM.Get() != 0 || AnyDeletions() || ticketQueue.HasTickets || pendingChangesInCurrentFullFlush;
-        }
-
-        public int BufferedDeleteTermsSize
-        {
-            get
-            {
-                return deleteQueue.BufferedUpdatesTermsSize;
-            }
-        }
-
-        //for testing
-        public int NumBufferedDeleteTerms
-        {
-            get
-            {
-                return deleteQueue.NumGlobalTermDeletes;
-            }
-        }
-
-        public bool AnyDeletions()
-        {
-            return deleteQueue.AnyChanges();
-        }
-
-        public void Dispose()
-        {
-            closed = true;
-            flushControl.SetClosed();
-        }
-
-        private bool PreUpdate()
-        {
-            EnsureOpen();
-            bool hasEvents = false;
-            if (flushControl.AnyStalledThreads() || flushControl.NumQueuedFlushes > 0)
-            {
-                // Help out flushing any queued DWPTs so we can un-stall:
-                if (infoStream.IsEnabled("DW"))
-                {
-                    infoStream.Message("DW", "DocumentsWriter has queued dwpt; will hijack this thread to flush pending segment(s)");
-                }
-                do
-                {
-                    // Try pick up pending threads here if possible
-                    DocumentsWriterPerThread flushingDWPT;
-                    while ((flushingDWPT = flushControl.NextPendingFlush()) != null)
-                    {
-                        // Don't push the delete here since the update could fail!
-                        hasEvents |= DoFlush(flushingDWPT);
-                    }
-
-                    if (infoStream.IsEnabled("DW"))
-                    {
-                        if (flushControl.AnyStalledThreads())
-                        {
-                            infoStream.Message("DW", "WARNING DocumentsWriter has stalled threads; waiting");
-                        }
-                    }
-
-                    flushControl.WaitIfStalled(); // block if stalled
-                } while (flushControl.NumQueuedFlushes != 0); // still queued DWPTs try help flushing
-
-                if (infoStream.IsEnabled("DW"))
-                {
-                    infoStream.Message("DW", "continue indexing after helping out flushing DocumentsWriter is healthy");
-                }
-            }
-            return hasEvents;
-        }
-
-        private bool PostUpdate(DocumentsWriterPerThread flushingDWPT, bool hasEvents)
-        {
-            hasEvents |= ApplyAllDeletes(deleteQueue);
-            if (flushingDWPT != null)
-            {
-                hasEvents |= DoFlush(flushingDWPT);
-            }
-            else
-            {
-                DocumentsWriterPerThread nextPendingFlush = flushControl.NextPendingFlush();
-                if (nextPendingFlush != null)
-                {
-                    hasEvents |= DoFlush(nextPendingFlush);
-                }
-            }
-
-            return hasEvents;
-        }
-
-        private void EnsureInitialized(ThreadState state)
-        {
-            if (state.IsActive && state.dwpt == null)
-            {
-                FieldInfos.Builder infos = new FieldInfos.Builder(writer.globalFieldNumberMap);
-                state.dwpt = new DocumentsWriterPerThread(writer.NewSegmentName(), directory, config, infoStream, deleteQueue, infos);
-            }
-        }
-
-        internal bool UpdateDocuments(IEnumerable<IEnumerable<IIndexableField>> docs, Analyzer analyzer, Term delTerm)
-        {
-            bool hasEvents = PreUpdate();
-
-            ThreadState perThread = flushControl.ObtainAndLock();
-            DocumentsWriterPerThread flushingDWPT;
-
-            try
-            {
-                if (!perThread.IsActive)
-                {
-                    EnsureOpen();
-                    Debug.Assert(false, "perThread is not active but we are still open");
-                }
-                EnsureInitialized(perThread);
-                Debug.Assert(perThread.IsInitialized);
-                DocumentsWriterPerThread dwpt = perThread.dwpt;
-                int dwptNumDocs = dwpt.NumDocsInRAM;
-                try
-                {
-                    int docCount = dwpt.UpdateDocuments(docs, analyzer, delTerm);
-                    numDocsInRAM.AddAndGet(docCount);
-                }
-                finally
-                {
-                    if (dwpt.CheckAndResetHasAborted())
-                    {
-                        if (dwpt.PendingFilesToDelete.Count > 0)
-                        {
-                            PutEvent(new DeleteNewFilesEvent(dwpt.PendingFilesToDelete));
-                        }
-                        SubtractFlushedNumDocs(dwptNumDocs);
-                        flushControl.DoOnAbort(perThread);
-                    }
-                }
-                bool isUpdate = delTerm != null;
-                flushingDWPT = flushControl.DoAfterDocument(perThread, isUpdate);
-            }
-            finally
-            {
-                perThread.Unlock();
-            }
-
-            return PostUpdate(flushingDWPT, hasEvents);
-        }
-
-        internal bool UpdateDocument(IEnumerable<IIndexableField> doc, Analyzer analyzer, Term delTerm)
-        {
-            bool hasEvents = PreUpdate();
-
-            ThreadState perThread = flushControl.ObtainAndLock();
-
-            DocumentsWriterPerThread flushingDWPT;
-            try
-            {
-                if (!perThread.IsActive)
-                {
-                    EnsureOpen();
-                    Debug.Assert(false, "perThread is not active but we are still open");
-                }
-                EnsureInitialized(perThread);
-                Debug.Assert(perThread.IsInitialized);
-                DocumentsWriterPerThread dwpt = perThread.dwpt;
-                int dwptNumDocs = dwpt.NumDocsInRAM;
-                try
-                {
-                    dwpt.UpdateDocument(doc, analyzer, delTerm);
-                    numDocsInRAM.IncrementAndGet();
-                }
-                finally
-                {
-                    if (dwpt.CheckAndResetHasAborted())
-                    {
-                        if (dwpt.PendingFilesToDelete.Count > 0)
-                        {
-                            PutEvent(new DeleteNewFilesEvent(dwpt.PendingFilesToDelete));
-                        }
-                        SubtractFlushedNumDocs(dwptNumDocs);
-                        flushControl.DoOnAbort(perThread);
-                    }
-                }
-                bool isUpdate = delTerm != null;
-                flushingDWPT = flushControl.DoAfterDocument(perThread, isUpdate);
-            }
-            finally
-            {
-                perThread.Unlock();
-            }
-
-            return PostUpdate(flushingDWPT, hasEvents);
-        }
-
-        private bool DoFlush(DocumentsWriterPerThread flushingDWPT)
-        {
-            bool hasEvents = false;
-            while (flushingDWPT != null)
-            {
-                hasEvents = true;
-                bool success = false;
-                SegmentFlushTicket ticket = null;
-                try
-                {
-                    Debug.Assert(currentFullFlushDelQueue == null || flushingDWPT.deleteQueue == currentFullFlushDelQueue, "expected: " + currentFullFlushDelQueue + "but was: " + flushingDWPT.deleteQueue + " " + flushControl.IsFullFlush);
-                    /*
-                     * Since with DWPT the flush process is concurrent and several DWPT
-                     * could flush at the same time we must maintain the order of the
-                     * flushes before we can apply the flushed segment and the frozen global
-                     * deletes it is buffering. The reason for this is that the global
-                     * deletes mark a certain point in time where we took a DWPT out of
-                     * rotation and freeze the global deletes.
-                     *
-                     * Example: A flush 'A' starts and freezes the global deletes, then
-                     * flush 'B' starts and freezes all deletes occurred since 'A' has
-                     * started. if 'B' finishes before 'A' we need to wait until 'A' is done
-                     * otherwise the deletes frozen by 'B' are not applied to 'A' and we
-                     * might miss to deletes documents in 'A'.
-                     */
-                    try
-                    {
-                        // Each flush is assigned a ticket in the order they acquire the ticketQueue lock
-                        ticket = ticketQueue.AddFlushTicket(flushingDWPT);
-
-                        int flushingDocsInRam = flushingDWPT.NumDocsInRAM;
-                        bool dwptSuccess = false;
-                        try
-                        {
-                            // flush concurrently without locking
-                            FlushedSegment newSegment = flushingDWPT.Flush();
-                            ticketQueue.AddSegment(ticket, newSegment);
-                            dwptSuccess = true;
-                        }
-                        finally
-                        {
-                            SubtractFlushedNumDocs(flushingDocsInRam);
-                            if (flushingDWPT.PendingFilesToDelete.Count > 0)
-                            {
-                                PutEvent(new DeleteNewFilesEvent(flushingDWPT.PendingFilesToDelete));
-                                hasEvents = true;
-                            }
-                            if (!dwptSuccess)
-                            {
-                                PutEvent(new FlushFailedEvent(flushingDWPT.SegmentInfo));
-                                hasEvents = true;
-                            }
-                        }
-                        // flush was successful once we reached this point - new seg. has been assigned to the ticket!
-                        success = true;
-                    }
-                    finally
-                    {
-                        if (!success && ticket != null)
-                        {
-                            // In the case of a failure make sure we are making progress and
-                            // apply all the deletes since the segment flush failed since the flush
-                            // ticket could hold global deletes see FlushTicket#canPublish()
-                            ticketQueue.MarkTicketFailed(ticket);
-                        }
-                    }
-                    /*
-                     * Now we are done and try to flush the ticket queue if the head of the
-                     * queue has already finished the flush.
-                     */
-                    if (ticketQueue.TicketCount >= perThreadPool.NumThreadStatesActive)
-                    {
-                        // this means there is a backlog: the one
-                        // thread in innerPurge can't keep up with all
-                        // other threads flushing segments.  In this case
-                        // we forcefully stall the producers.
-                        PutEvent(ForcedPurgeEvent.INSTANCE);
-                        break;
-                    }
-                }
-                finally
-                {
-                    flushControl.DoAfterFlush(flushingDWPT);
-                    flushingDWPT.CheckAndResetHasAborted();
-                }
-
-                flushingDWPT = flushControl.NextPendingFlush();
-            }
-            if (hasEvents)
-            {
-                PutEvent(MergePendingEvent.INSTANCE);
-            }
-            // If deletes alone are consuming > 1/2 our RAM
-            // buffer, force them all to apply now. this is to
-            // prevent too-frequent flushing of a long tail of
-            // tiny segments:
-            double ramBufferSizeMB = config.RAMBufferSizeMB;
-            if (ramBufferSizeMB != Index.IndexWriterConfig.DISABLE_AUTO_FLUSH && flushControl.DeleteBytesUsed > (1024 * 1024 * ramBufferSizeMB / 2))
-            {
-                if (infoStream.IsEnabled("DW"))
-                {
-                    infoStream.Message("DW", "force apply deletes bytesUsed=" + flushControl.DeleteBytesUsed + " vs ramBuffer=" + (1024 * 1024 * ramBufferSizeMB));
-                }
-                hasEvents = true;
-                if (!this.ApplyAllDeletes(deleteQueue))
-                {
-                    PutEvent(ApplyDeletesEvent.INSTANCE);
-                }
-            }
-
-            return hasEvents;
-        }
-
-        internal void SubtractFlushedNumDocs(int numFlushed)
-        {
-            int oldValue = numDocsInRAM.Get();
-            while (!numDocsInRAM.CompareAndSet(oldValue, oldValue - numFlushed))
-            {
-                oldValue = numDocsInRAM.Get();
-            }
-        }
-
-        // for asserts
-        private volatile DocumentsWriterDeleteQueue currentFullFlushDelQueue = null;
-
-        // for asserts
-        private bool SetFlushingDeleteQueue(DocumentsWriterDeleteQueue session)
-        {
-            lock (this)
-            {
-                currentFullFlushDelQueue = session;
-                return true;
-            }
-        }
-
-        /*
-         * FlushAllThreads is synced by IW fullFlushLock. Flushing all threads is a
-         * two stage operation; the caller must ensure (in try/finally) that finishFlush
-         * is called after this method, to release the flush lock in DWFlushControl
-         */
-
-        internal bool FlushAllThreads(IndexWriter indexWriter)
-        {
-            DocumentsWriterDeleteQueue flushingDeleteQueue;
-            if (infoStream.IsEnabled("DW"))
-            {
-                infoStream.Message("DW", "startFullFlush");
-            }
-
-            lock (this)
-            {
-                pendingChangesInCurrentFullFlush = AnyChanges();
-                flushingDeleteQueue = deleteQueue;
-                /* Cutover to a new delete queue.  this must be synced on the flush control
-                 * otherwise a new DWPT could sneak into the loop with an already flushing
-                 * delete queue */
-                flushControl.MarkForFullFlush(); // swaps the delQueue synced on FlushControl
-                Debug.Assert(SetFlushingDeleteQueue(flushingDeleteQueue));
-            }
-            Debug.Assert(currentFullFlushDelQueue != null);
-            Debug.Assert(currentFullFlushDelQueue != deleteQueue);
-
-            bool anythingFlushed = false;
-            try
-            {
-                DocumentsWriterPerThread flushingDWPT;
-                // Help out with flushing:
-                while ((flushingDWPT = flushControl.NextPendingFlush()) != null)
-                {
-                    anythingFlushed |= DoFlush(flushingDWPT);
-                }
-                // If a concurrent flush is still in flight wait for it
-                flushControl.WaitForFlush();
-                if (!anythingFlushed && flushingDeleteQueue.AnyChanges()) // apply deletes if we did not flush any document
-                {
-                    if (infoStream.IsEnabled("DW"))
-                    {
-                        infoStream.Message("DW", Thread.CurrentThread.Name + ": flush naked frozen global deletes");
-                    }
-                    ticketQueue.AddDeletes(flushingDeleteQueue);
-                }
-                ticketQueue.ForcePurge(indexWriter);
-                Debug.Assert(!flushingDeleteQueue.AnyChanges() && !ticketQueue.HasTickets);
-            }
-            finally
-            {
-                Debug.Assert(flushingDeleteQueue == currentFullFlushDelQueue);
-            }
-            return anythingFlushed;
-        }
-
-        internal void FinishFullFlush(bool success)
-        {
-            try
-            {
-                if (infoStream.IsEnabled("DW"))
-                {
-                    infoStream.Message("DW", Thread.CurrentThread.Name + " finishFullFlush success=" + success);
-                }
-                Debug.Assert(SetFlushingDeleteQueue(null));
-                if (success)
-                {
-                    // Release the flush lock
-                    flushControl.FinishFullFlush();
-                }
-                else
-                {
-                    HashSet<string> newFilesSet = new HashSet<string>();
-                    flushControl.AbortFullFlushes(newFilesSet);
-                    PutEvent(new DeleteNewFilesEvent(newFilesSet));
-                }
-            }
-            finally
-            {
-                pendingChangesInCurrentFullFlush = false;
-            }
-        }
-
-        public LiveIndexWriterConfig IndexWriterConfig
-        {
-            get
-            {
-                return config;
-            }
-        }
-
-        private void PutEvent(IEvent @event)
-        {
-            events.Enqueue(@event);
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        internal sealed class ApplyDeletesEvent : IEvent
-        {
-            internal static readonly IEvent INSTANCE = new ApplyDeletesEvent();
-            private int instCount = 0; // LUCENENET TODO: What is this for? It will always be zero when initialized and 1 after the constructor is called. Should it be static?
-
-            internal ApplyDeletesEvent()
-            {
-                Debug.Assert(instCount == 0);
-                instCount++;
-            }
-
-            public void Process(IndexWriter writer, bool triggerMerge, bool forcePurge)
-            {
-                writer.ApplyDeletesAndPurge(true); // we always purge!
-            }
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        internal sealed class MergePendingEvent : IEvent
-        {
-            internal static readonly IEvent INSTANCE = new MergePendingEvent();
-            private int instCount = 0; // LUCENENET TODO: What is this for? It will always be zero when initialized and 1 after the constructor is called. Should it be static?
-
-            internal MergePendingEvent()
-            {
-                Debug.Assert(instCount == 0);
-                instCount++;
-            }
-
-            public void Process(IndexWriter writer, bool triggerMerge, bool forcePurge)
-            {
-                writer.DoAfterSegmentFlushed(triggerMerge, forcePurge);
-            }
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        internal sealed class ForcedPurgeEvent : IEvent
-        {
-            internal static readonly IEvent INSTANCE = new ForcedPurgeEvent();
-            private int instCount = 0; // LUCENENET TODO: What is this for? It will always be zero when initialized and 1 after the constructor is called. Should it be static?
-
-            internal ForcedPurgeEvent()
-            {
-                Debug.Assert(instCount == 0);
-                instCount++;
-            }
-
-            public void Process(IndexWriter writer, bool triggerMerge, bool forcePurge)
-            {
-                writer.Purge(true);
-            }
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        internal class FlushFailedEvent : IEvent
-        {
-            private readonly SegmentInfo info;
-
-            public FlushFailedEvent(SegmentInfo info)
-            {
-                this.info = info;
-            }
-
-            public void Process(IndexWriter writer, bool triggerMerge, bool forcePurge)
-            {
-                writer.FlushFailed(info);
-            }
-        }
-
-#if FEATURE_SERIALIZABLE
-        [Serializable]
-#endif
-        internal class DeleteNewFilesEvent : IEvent
-        {
-            private readonly ICollection<string> files;
-
-            public DeleteNewFilesEvent(ICollection<string> files)
-            {
-                this.files = files;
-            }
-
-            public void Process(IndexWriter writer, bool triggerMerge, bool forcePurge)
-            {
-                writer.DeleteNewFiles(files);
-            }
-        }
-
-        public ConcurrentQueue<IEvent> EventQueue
-        {
-            get { return events; }
-        }
-    }
-}
\ No newline at end of file


Mime
View raw message