lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [44/62] [abbrv] lucenenet git commit: Lucene.Net.Core.Analysis: Deleted obsolete Analysis files that have mostly been moved to Lucene.Net.Analysis.Common
Date Sat, 01 Apr 2017 01:09:37 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/BaseCharFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/BaseCharFilter.cs b/src/Lucene.Net.Core/Analysis/BaseCharFilter.cs
deleted file mode 100644
index 7c91e1c..0000000
--- a/src/Lucene.Net.Core/Analysis/BaseCharFilter.cs
+++ /dev/null
@@ -1,105 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-using Lucene.Net.Support;
-using Lucene.Net.Util;
-
-namespace Lucene.Net.Analysis
-{
-
-    /// <summary>
-    /// * Base utility class for implementing a <see cref="CharFilter" />.
-    /// * You subclass this, and then record mappings by calling
-    /// * <see cref="AddOffCorrectMap" />, and then invoke the correct
-    /// * method to correct an offset.
-    /// </summary>
-    public abstract class BaseCharFilter : CharFilter
-    {
-
-        private int[] offsets;
-        private int[] diffs;
-        private int size = 0;
-
-        protected BaseCharFilter(CharStream @in) : base(@in)
-        {
-        }
-
-        /* Retrieve the corrected offset. */
-        //@Override
-        protected internal override int Correct(int currentOff)
-        {
-            if (offsets == null || currentOff < offsets[0])
-            {
-                return currentOff;
-            }
-
-            int hi = size - 1;
-            if (currentOff >= offsets[hi])
-                return currentOff + diffs[hi];
-
-            int lo = 0;
-            int mid = -1;
-
-            while (hi >= lo)
-            {
-                mid = Number.URShift(lo + hi, 1);
-                if (currentOff < offsets[mid])
-                    hi = mid - 1;
-                else if (currentOff > offsets[mid])
-                    lo = mid + 1;
-                else
-                    return currentOff + diffs[mid];
-            }
-
-            if (currentOff < offsets[mid])
-                return mid == 0 ? currentOff : currentOff + diffs[mid - 1];
-            return currentOff + diffs[mid];
-        }
-
-        protected int LastCumulativeDiff
-        {
-            get
-            {
-                return offsets == null ? 0 : diffs[size - 1];
-            }
-        }
-
-        [Obsolete("Use LastCumulativeDiff property instead")]
-        protected int GetLastCumulativeDiff()
-        {
-            return LastCumulativeDiff;
-        }
-
-        protected void AddOffCorrectMap(int off, int cumulativeDiff)
-        {
-            if (offsets == null)
-            {
-                offsets = new int[64];
-                diffs = new int[64];
-            }
-            else if (size == offsets.Length)
-            {
-                offsets = ArrayUtil.Grow(offsets);
-                diffs = ArrayUtil.Grow(diffs);
-            }
-
-            offsets[size] = off;
-            diffs[size++] = cumulativeDiff;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/CharArraySet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/CharArraySet.cs b/src/Lucene.Net.Core/Analysis/CharArraySet.cs
deleted file mode 100644
index 5564f74..0000000
--- a/src/Lucene.Net.Core/Analysis/CharArraySet.cs
+++ /dev/null
@@ -1,517 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-using System.Collections;
-using System.Linq;
-using System.Collections.Generic;
-
-namespace Lucene.Net.Analysis
-{
-    /// <summary> A simple class that stores Strings as char[]'s in a
-    /// hash table.  Note that this is not a general purpose
-    /// class.  For example, it cannot remove items from the
-    /// set, nor does it resize its hash table to be smaller,
-    /// etc.  It is designed to be quick to test if a char[]
-    /// is in the set without the necessity of converting it
-    /// to a String first.
-    /// <p/>
-    /// <em>Please note:</em> This class implements <see cref="System.Collections.Generic.ISet{T}"/> but
-    /// does not behave like it should in all cases. The generic type is
-    /// <see cref="System.Collections.Generic.ICollection{T}"/>, because you can add any object to it,
-    /// that has a string representation. The add methods will use
-    /// <see cref="object.ToString()"/> and store the result using a <see cref="char"/>
-    /// buffer. The same behaviour have the <see cref="Contains(object)"/> methods.
-    /// The <see cref="GetEnumerator"/> method returns an <see cref="string"/> IEnumerable.
-    /// For type safety also {@link #stringIterator()} is provided.
-    /// </summary>
-    // TODO: java uses wildcards, .net doesn't have this, easiest way is to 
-    //       make the entire class generic.  Ultimately, though, since this
-    //       works with strings, I can't think of a reason not to just declare
-    //       this as an ISet<string>.
-    public class CharArraySet : ISet<string>
-    {
-        bool _ReadOnly = false;
-        const int INIT_SIZE = 8;
-        char[][] _Entries;
-        int _Count;
-        bool _IgnoreCase;
-        public static CharArraySet EMPTY_SET = UnmodifiableSet(new CharArraySet(0, false));
-
-        private void Init(int startSize, bool ignoreCase)
-        {
-            this._IgnoreCase = ignoreCase;
-            int size = INIT_SIZE;
-            while (startSize + (startSize >> 2) > size)
-                size <<= 1;
-            _Entries = new char[size][];
-        }
-
-        /// <summary>Create set with enough capacity to hold startSize
-        /// terms 
-        /// </summary>
-        public CharArraySet(int startSize, bool ignoreCase)
-        {
-            Init(startSize, ignoreCase);
-        }
-
-        public CharArraySet(IEnumerable<string> c, bool ignoreCase)
-        {
-            Init(c.Count(), ignoreCase);
-            AddItems(c);
-        }
-
-        /// <summary>Create set from a Collection of char[] or String </summary>
-        public CharArraySet(IEnumerable<object> c, bool ignoreCase)
-        {
-            Init(c.Count(), ignoreCase);
-            AddItems(c);
-        }
-
-        private void AddItems<T>(IEnumerable<T> items)
-        {
-            foreach(var item in items)
-            {
-                Add(item.ToString());
-            }
-        }
-
-        /// <summary>Create set from entries </summary>
-        private CharArraySet(char[][] entries, bool ignoreCase, int count)
-        {
-            this._Entries = entries;
-            this._IgnoreCase = ignoreCase;
-            this._Count = count;
-        }
-
-        /// <summary>true if the <c>len</c> chars of <c>text</c> starting at <c>off</c>
-        /// are in the set 
-        /// </summary>
-        public virtual bool Contains(char[] text, int off, int len)
-        {
-            return _Entries[GetSlot(text, off, len)] != null;
-        }
-
-        public virtual bool Contains(string text)
-        {
-            return _Entries[GetSlot(text)] != null;
-        }
-
-
-        private int GetSlot(char[] text, int off, int len)
-        {
-            int code = GetHashCode(text, off, len);
-            int pos = code & (_Entries.Length - 1);
-            char[] text2 = _Entries[pos];
-            if (text2 != null && !Equals(text, off, len, text2))
-            {
-                int inc = ((code >> 8) + code) | 1;
-                do
-                {
-                    code += inc;
-                    pos = code & (_Entries.Length - 1);
-                    text2 = _Entries[pos];
-                }
-                while (text2 != null && !Equals(text, off, len, text2));
-            }
-            return pos;
-        }
-
-        /// <summary>Returns true if the String is in the set </summary>
-        private int GetSlot(string text)
-        {
-            int code = GetHashCode(text);
-            int pos = code & (_Entries.Length - 1);
-            char[] text2 = _Entries[pos];
-            if (text2 != null && !Equals(text, text2))
-            {
-                int inc = ((code >> 8) + code) | 1;
-                do
-                {
-                    code += inc;
-                    pos = code & (_Entries.Length - 1);
-                    text2 = _Entries[pos];
-                }
-                while (text2 != null && !Equals(text, text2));
-            }
-            return pos;
-        }
-
-        public bool Add(string text)
-        {
-            if (_ReadOnly) throw new NotSupportedException();
-            return Add(text.ToCharArray());
-        }
-
-        /// <summary>Add this char[] directly to the set.
-        /// If ignoreCase is true for this Set, the text array will be directly modified.
-        /// The user should never modify this text array after calling this method.
-        /// </summary>
-        public bool Add(char[] text)
-        {
-            if (_ReadOnly) throw new NotSupportedException();
-
-            if (_IgnoreCase)
-                for (int i = 0; i < text.Length; i++)
-                    text[i] = Char.ToLower(text[i]);
-            int slot = GetSlot(text, 0, text.Length);
-            if (_Entries[slot] != null)
-                return false;
-            _Entries[slot] = text;
-            _Count++;
-
-            if (_Count + (_Count >> 2) > _Entries.Length)
-            {
-                Rehash();
-            }
-
-            return true;
-        }
-
-        private bool Equals(char[] text1, int off, int len, char[] text2)
-        {
-            if (len != text2.Length)
-                return false;
-            if (_IgnoreCase)
-            {
-                for (int i = 0; i < len; i++)
-                {
-                    if (char.ToLower(text1[off + i]) != text2[i])
-                        return false;
-                }
-            }
-            else
-            {
-                for (int i = 0; i < len; i++)
-                {
-                    if (text1[off + i] != text2[i])
-                        return false;
-                }
-            }
-            return true;
-        }
-
-        private bool Equals(string text1, char[] text2)
-        {
-            int len = text1.Length;
-            if (len != text2.Length)
-                return false;
-            if (_IgnoreCase)
-            {
-                for (int i = 0; i < len; i++)
-                {
-                    if (char.ToLower(text1[i]) != text2[i])
-                        return false;
-                }
-            }
-            else
-            {
-                for (int i = 0; i < len; i++)
-                {
-                    if (text1[i] != text2[i])
-                        return false;
-                }
-            }
-            return true;
-        }
-
-        private void Rehash()
-        {
-            int newSize = 2 * _Entries.Length;
-            char[][] oldEntries = _Entries;
-            _Entries = new char[newSize][];
-
-            for (int i = 0; i < oldEntries.Length; i++)
-            {
-                char[] text = oldEntries[i];
-                if (text != null)
-                {
-                    // todo: could be faster... no need to compare strings on collision
-                    _Entries[GetSlot(text, 0, text.Length)] = text;
-                }
-            }
-        }
-
-        private int GetHashCode(char[] text, int offset, int len)
-        {
-            int code = 0;
-            int stop = offset + len;
-            if (_IgnoreCase)
-            {
-                for (int i = offset; i < stop; i++)
-                {
-                    code = code * 31 + char.ToLower(text[i]);
-                }
-            }
-            else
-            {
-                for (int i = offset; i < stop; i++)
-                {
-                    code = code * 31 + text[i];
-                }
-            }
-            return code;
-        }
-
-        private int GetHashCode(string text)
-        {
-            int code = 0;
-            int len = text.Length;
-            if (_IgnoreCase)
-            {
-                for (int i = 0; i < len; i++)
-                {
-                    code = code * 31 + char.ToLower(text[i]);
-                }
-            }
-            else
-            {
-                for (int i = 0; i < len; i++)
-                {
-                    code = code * 31 + text[i];
-                }
-            }
-            return code;
-        }
-
-        public int Count
-        {
-            get { return _Count; }
-        }
-
-        public bool IsEmpty
-        {
-            get { return _Count == 0; }
-        }
-
-        public bool Contains(object item)
-        {
-            var text = item as char[];
-            return text != null ? Contains(text, 0, text.Length) : Contains(item.ToString());
-        }
-
-        public bool Add(object item)
-        {
-            return Add(item.ToString());
-        }
-
-        void ICollection<string>.Add(string item)
-        {
-            this.Add(item);
-        }
-
-        /// <summary>
-        /// Returns an unmodifiable <see cref="CharArraySet"/>.  This allows to provide
-        /// unmodifiable views of internal sets for "read-only" use
-        /// </summary>
-        /// <param name="set">A Set for which the unmodifiable set it returns.</param>
-        /// <returns>A new unmodifiable <see cref="CharArraySet"/></returns>
-        /// <throws>ArgumentNullException of the given set is <c>null</c></throws>
-        public static CharArraySet UnmodifiableSet(CharArraySet set)
-        {
-            if(set == null)
-                throw new ArgumentNullException("Given set is null");
-            if (set == EMPTY_SET)
-                return EMPTY_SET;
-            if (set._ReadOnly)
-                return set;
-
-            var newSet = new CharArraySet(set._Entries, set._IgnoreCase, set.Count) {IsReadOnly = true};
-            return newSet;
-        }
-
-        /// <summary>
-        /// returns a copy of the given set as a <see cref="CharArraySet"/>.  If the given set
-        /// is a <see cref="CharArraySet"/> the ignoreCase property will be preserved.
-        /// </summary>
-        /// <param name="set">A set to copy</param>
-        /// <returns>a copy of the given set as a <see cref="CharArraySet"/>.  If the given set
-        /// is a <see cref="CharArraySet"/> the ignoreCase property will be preserved.</returns>
-        public static CharArraySet Copy<T>(ISet<T> set)
-        {
-            if (set == null)
-                throw new ArgumentNullException("set", "Given set is null!");
-            if (set == EMPTY_SET)
-                return EMPTY_SET;
-            bool ignoreCase = set is CharArraySet && ((CharArraySet)set)._IgnoreCase;
-            var arrSet = new CharArraySet(set.Count, ignoreCase);
-            arrSet.AddItems(set);
-            return arrSet;
-        }
-
-        public void Clear()
-        {
-            throw new NotSupportedException("Remove not supported!");
-        }
-
-        public bool IsReadOnly
-        {
-            get { return _ReadOnly; }
-            private set { _ReadOnly = value; }
-        }
-
-        /// <summary>Adds all of the elements in the specified collection to this collection </summary>
-        public void UnionWith(IEnumerable<string> other)
-        {
-            if (_ReadOnly) throw new NotSupportedException();
-
-            foreach (string s in other)
-            {
-                Add(s.ToCharArray());
-            }
-        }
-
-        /// <summary>Wrapper that calls UnionWith</summary>
-        public void AddAll(IEnumerable<string> coll)
-        {
-            UnionWith(coll);
-        }
-
-        #region Unneeded methods
-        public void RemoveAll(ICollection<string> c)
-        {
-            throw new NotSupportedException();
-        }
-
-        public void RetainAll(ICollection<string> c)
-        {
-            throw new NotSupportedException();
-        }
-
-        void ICollection<string>.CopyTo(string[] array, int arrayIndex)
-        {
-            throw new NotSupportedException();
-        }
-
-        void ISet<string>.IntersectWith(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        void ISet<string>.ExceptWith(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        void ISet<string>.SymmetricExceptWith(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        bool ISet<string>.IsSubsetOf(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        bool ISet<string>.IsSupersetOf(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        bool ISet<string>.IsProperSupersetOf(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        bool ISet<string>.IsProperSubsetOf(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        bool ISet<string>.Overlaps(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        bool ISet<string>.SetEquals(IEnumerable<string> other)
-        {
-            throw new NotSupportedException();
-        }
-
-        bool ICollection<string>.Remove(string item)
-        {
-            throw new NotSupportedException();
-        }
-        #endregion
-
-        /// <summary>
-        /// The IEnumerator&lt;String&gt; for this set.  Strings are constructed on the fly,
-        /// so use <c>nextCharArray</c> for more efficient access
-        /// </summary>
-        public class CharArraySetEnumerator : IEnumerator<string>
-        {
-            readonly CharArraySet _Creator;
-            int pos = -1;
-            char[] cur;
-
-            protected internal CharArraySetEnumerator(CharArraySet creator)
-            {
-                _Creator = creator;
-            }
-
-            public bool MoveNext()
-            {
-                cur = null;
-                pos++;
-                while (pos < _Creator._Entries.Length && (cur = _Creator._Entries[pos]) == null)
-                    pos++;
-                return cur != null;
-            }
-
-            /// <summary>do not modify the returned char[] </summary>
-            public char[] NextCharArray()
-            {
-                return cur;
-            }
-
-            public string Current
-            {
-                get { return new string(NextCharArray()); }
-            }
-
-            public void Dispose()
-            {
-            }
-
-            object IEnumerator.Current
-            {
-                get { return new string(NextCharArray()); }
-            }
-
-            public void Reset()
-            {
-                throw new NotImplementedException();
-            }
-        }
-
-        public IEnumerator<string> StringEnumerator()
-        {
-            return new CharArraySetEnumerator(this);
-        }
-
-        public IEnumerator<string> GetEnumerator()
-        {
-            return new CharArraySetEnumerator(this);
-        }
-
-        IEnumerator IEnumerable.GetEnumerator()
-        {
-            return GetEnumerator();
-        }
-    }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/CharReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/CharReader.cs b/src/Lucene.Net.Core/Analysis/CharReader.cs
deleted file mode 100644
index 7dc9f50..0000000
--- a/src/Lucene.Net.Core/Analysis/CharReader.cs
+++ /dev/null
@@ -1,94 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> CharReader is a Reader wrapper. It reads chars from
-    /// Reader and outputs <see cref="CharStream" />, defining an
-    /// identify function <see cref="CorrectOffset" /> method that
-    /// simply returns the provided offset.
-    /// </summary>
-    public sealed class CharReader:CharStream
-    {
-        private long currentPosition = -1;
-
-        private bool isDisposed;
-
-        internal System.IO.StreamReader input;
-        
-        public static CharStream Get(System.IO.TextReader input)
-        {
-            var charStream = input as CharStream;
-            if (charStream != null)
-                return charStream;
-            
-            // {{Aroush-2.9}} isn't there a better (faster) way to do this?
-            var theString = new System.IO.MemoryStream(System.Text.Encoding.UTF8.GetBytes(input.ReadToEnd()));
-            return new CharReader(new System.IO.StreamReader(theString));
-            //return input is CharStream?(CharStream) input:new CharReader(input);
-        }
-        
-        private CharReader(System.IO.StreamReader in_Renamed) : base(in_Renamed)
-        {
-            input = in_Renamed;
-        }
-        
-        public override int CorrectOffset(int currentOff)
-        {
-            return currentOff;
-        }
-
-        protected override void Dispose(bool disposing)
-        {
-            if (isDisposed) return;
-
-            if (disposing)
-            {
-                if (input != null)
-                {
-                    input.Close();
-                }
-            }
-
-            input = null;
-            isDisposed = true;
-            base.Dispose(disposing);
-        }
-        
-        public  override int Read(System.Char[] cbuf, int off, int len)
-        {
-            return input.Read(cbuf, off, len);
-        }
-        
-        public bool MarkSupported()
-        {
-            return input.BaseStream.CanSeek;
-        }
-        
-        public void  Mark(int readAheadLimit)
-        {
-            currentPosition = input.BaseStream.Position;
-            input.BaseStream.Position = readAheadLimit;
-        }
-        
-        public void  Reset()
-        {
-            input.BaseStream.Position = currentPosition;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/CharStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/CharStream.cs b/src/Lucene.Net.Core/Analysis/CharStream.cs
deleted file mode 100644
index 22aaaae..0000000
--- a/src/Lucene.Net.Core/Analysis/CharStream.cs
+++ /dev/null
@@ -1,45 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> CharStream adds <see cref="CorrectOffset" />
-    /// functionality over <see cref="System.IO.TextReader" />.  All Tokenizers accept a
-    /// CharStream instead of <see cref="System.IO.TextReader" /> as input, which enables
-    /// arbitrary character based filtering before tokenization. 
-    /// The <see cref="CorrectOffset" /> method fixed offsets to account for
-    /// removal or insertion of characters, so that the offsets
-    /// reported in the tokens match the character offsets of the
-    /// original Reader.
-    /// </summary>
-    public abstract class CharStream : System.IO.StreamReader
-    {
-        protected CharStream(System.IO.StreamReader reader) : base(reader.BaseStream)
-        {
-        }
-        
-        /// <summary> Called by CharFilter(s) and Tokenizer to correct token offset.
-        /// 
-        /// </summary>
-        /// <param name="currentOff">offset as seen in the output
-        /// </param>
-        /// <returns> corrected offset based on the input
-        /// </returns>
-        public abstract int CorrectOffset(int currentOff);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/CharTokenizer.cs b/src/Lucene.Net.Core/Analysis/CharTokenizer.cs
deleted file mode 100644
index 3c34664..0000000
--- a/src/Lucene.Net.Core/Analysis/CharTokenizer.cs
+++ /dev/null
@@ -1,135 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using Lucene.Net.Analysis.Tokenattributes;
-using AttributeSource = Lucene.Net.Util.AttributeSource;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary>An abstract base class for simple, character-oriented tokenizers.</summary>
-    public abstract class CharTokenizer:Tokenizer
-    {
-        protected CharTokenizer(System.IO.TextReader input):base(input)
-        {
-            offsetAtt = AddAttribute<IOffsetAttribute>();
-            termAtt = AddAttribute<ITermAttribute>();
-        }
-
-        protected CharTokenizer(AttributeSource source, System.IO.TextReader input):base(source, input)
-        {
-            offsetAtt = AddAttribute<IOffsetAttribute>();
-            termAtt = AddAttribute<ITermAttribute>();
-        }
-
-        protected CharTokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory, input)
-        {
-            offsetAtt = AddAttribute<IOffsetAttribute>();
-            termAtt = AddAttribute<ITermAttribute>();
-        }
-        
-        private int offset = 0, bufferIndex = 0, dataLen = 0;
-        private const int MAX_WORD_LEN = 255;
-        private const int IO_BUFFER_SIZE = 4096;
-        private readonly char[] ioBuffer = new char[IO_BUFFER_SIZE];
-        
-        private readonly ITermAttribute termAtt;
-        private readonly IOffsetAttribute offsetAtt;
-        
-        /// <summary>Returns true iff a character should be included in a token.  This
-        /// tokenizer generates as tokens adjacent sequences of characters which
-        /// satisfy this predicate.  Characters for which this is false are used to
-        /// define token boundaries and are not included in tokens. 
-        /// </summary>
-        protected internal abstract bool IsTokenChar(char c);
-        
-        /// <summary>Called on each token character to normalize it before it is added to the
-        /// token.  The default implementation does nothing.  Subclasses may use this
-        /// to, e.g., lowercase tokens. 
-        /// </summary>
-        protected internal virtual char Normalize(char c)
-        {
-            return c;
-        }
-        
-        public override bool IncrementToken()
-        {
-            ClearAttributes();
-            int length = 0;
-            int start = bufferIndex;
-            char[] buffer = termAtt.TermBuffer();
-            while (true)
-            {
-                
-                if (bufferIndex >= dataLen)
-                {
-                    offset += dataLen;
-                    dataLen = input.Read(ioBuffer, 0, ioBuffer.Length);
-                    if (dataLen <= 0)
-                    {
-                        dataLen = 0; // so next offset += dataLen won't decrement offset
-                        if (length > 0)
-                            break;
-                        return false;
-                    }
-                    bufferIndex = 0;
-                }
-                
-                char c = ioBuffer[bufferIndex++];
-                
-                if (IsTokenChar(c))
-                {
-                    // if it's a token char
-                    
-                    if (length == 0)
-                    // start of token
-                        start = offset + bufferIndex - 1;
-                    else if (length == buffer.Length)
-                        buffer = termAtt.ResizeTermBuffer(1 + length);
-                    
-                    buffer[length++] = Normalize(c); // buffer it, normalized
-                    
-                    if (length == MAX_WORD_LEN)
-                    // buffer overflow!
-                        break;
-                }
-                else if (length > 0)
-                // at non-Letter w/ chars
-                    break; // return 'em
-            }
-            
-            termAtt.SetTermLength(length);
-            offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + length));
-            return true;
-        }
-        
-        public override void  End()
-        {
-            // set final offset
-            int finalOffset = CorrectOffset(offset);
-            offsetAtt.SetOffset(finalOffset, finalOffset);
-        }
-        
-        public override void  Reset(System.IO.TextReader input)
-        {
-            base.Reset(input);
-            bufferIndex = 0;
-            offset = 0;
-            dataLen = 0;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/ISOLatin1AccentFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/ISOLatin1AccentFilter.cs b/src/Lucene.Net.Core/Analysis/ISOLatin1AccentFilter.cs
deleted file mode 100644
index a6fde44..0000000
--- a/src/Lucene.Net.Core/Analysis/ISOLatin1AccentFilter.cs
+++ /dev/null
@@ -1,344 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-using Lucene.Net.Analysis.Tokenattributes;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> A filter that replaces accented characters in the ISO Latin 1 character set 
-    /// (ISO-8859-1) by their unaccented equivalent. The case will not be altered.
-    /// <p/>
-    /// For instance, '&#192;' will be replaced by 'a'.
-    /// <p/>
-    /// 
-    /// </summary>
-    /// <deprecated> If you build a new index, use <see cref="ASCIIFoldingFilter"/>
-    /// which covers a superset of Latin 1.
-    /// This class is included for use with existing indexes and will be removed
-    /// in a future release (possible Lucene 4.0)
-    /// </deprecated>
-    [Obsolete("If you build a new index, use ASCIIFoldingFilter which covers a superset of Latin 1.  This class is included for use with existing indexes and will be removed in a future release (possible Lucene 4.0).")]
-    public class ISOLatin1AccentFilter : TokenFilter
-    {
-        public ISOLatin1AccentFilter(TokenStream input):base(input)
-        {
-            termAtt = AddAttribute<ITermAttribute>();
-        }
-        
-        private char[] output = new char[256];
-        private int outputPos;
-        private readonly ITermAttribute termAtt;
-        
-        public override bool IncrementToken()
-        {
-            if (input.IncrementToken())
-            {
-                char[] buffer = termAtt.TermBuffer();
-                int length = termAtt.TermLength();
-                // If no characters actually require rewriting then we
-                // just return token as-is:
-                for (int i = 0; i < length; i++)
-                {
-                    char c = buffer[i];
-                    if (c >= '\u00c0' && c <= '\uFB06')
-                    {
-                        RemoveAccents(buffer, length);
-                        termAtt.SetTermBuffer(output, 0, outputPos);
-                        break;
-                    }
-                }
-                return true;
-            }
-            return false;
-        }
-
-        /// <summary> To replace accented characters in a String by unaccented equivalents.</summary>
-        public void  RemoveAccents(char[] input, int length)
-        {
-            
-            // Worst-case length required:
-            int maxSizeNeeded = 2 * length;
-            
-            int size = output.Length;
-            while (size < maxSizeNeeded)
-                size *= 2;
-            
-            if (size != output.Length)
-                output = new char[size];
-            
-            outputPos = 0;
-            
-            int pos = 0;
-            
-            for (int i = 0; i < length; i++, pos++)
-            {
-                char c = input[pos];
-                
-                // Quick test: if it's not in range then just keep
-                // current character
-                if (c < '\u00c0' || c > '\uFB06')
-                    output[outputPos++] = c;
-                else
-                {
-                    switch (c)
-                    {
-                        
-                        case '\u00C0': 
-                        // À
-                        case '\u00C1': 
-                        // �?
-                        case '\u00C2': 
-                        // Â
-                        case '\u00C3': 
-                        // Ã
-                        case '\u00C4': 
-                        // Ä
-                        case '\u00C5':  // Ã…
-                            output[outputPos++] = 'A';
-                            break;
-                        
-                        case '\u00C6':  // Æ
-                            output[outputPos++] = 'A';
-                            output[outputPos++] = 'E';
-                            break;
-                        
-                        case '\u00C7':  // Ç
-                            output[outputPos++] = 'C';
-                            break;
-                        
-                        case '\u00C8': 
-                        // È
-                        case '\u00C9': 
-                        // É
-                        case '\u00CA': 
-                        // Ê
-                        case '\u00CB':  // Ë
-                            output[outputPos++] = 'E';
-                            break;
-                        
-                        case '\u00CC': 
-                        // Ì
-                        case '\u00CD': 
-                        // �?
-                        case '\u00CE': 
-                        // ÃŽ
-                        case '\u00CF':  // �?
-                            output[outputPos++] = 'I';
-                            break;
-                        
-                        case '\u0132':  // IJ
-                            output[outputPos++] = 'I';
-                            output[outputPos++] = 'J';
-                            break;
-                        
-                        case '\u00D0':  // �?
-                            output[outputPos++] = 'D';
-                            break;
-                        
-                        case '\u00D1':  // Ñ
-                            output[outputPos++] = 'N';
-                            break;
-                        
-                        case '\u00D2': 
-                        // Ã’
-                        case '\u00D3': 
-                        // Ó
-                        case '\u00D4': 
-                        // Ô
-                        case '\u00D5': 
-                        // Õ
-                        case '\u00D6': 
-                        // Ö
-                        case '\u00D8':  // Ø
-                            output[outputPos++] = 'O';
-                            break;
-                        
-                        case '\u0152':  // Å’
-                            output[outputPos++] = 'O';
-                            output[outputPos++] = 'E';
-                            break;
-                        
-                        case '\u00DE':  // Þ
-                            output[outputPos++] = 'T';
-                            output[outputPos++] = 'H';
-                            break;
-                        
-                        case '\u00D9': 
-                        // Ù
-                        case '\u00DA': 
-                        // Ú
-                        case '\u00DB': 
-                        // Û
-                        case '\u00DC':  // Ü
-                            output[outputPos++] = 'U';
-                            break;
-                        
-                        case '\u00DD': 
-                        // �?
-                        case '\u0178':  // Ÿ
-                            output[outputPos++] = 'Y';
-                            break;
-                        
-                        case '\u00E0': 
-                        // à
-                        case '\u00E1': 
-                        // á
-                        case '\u00E2': 
-                        // â
-                        case '\u00E3': 
-                        // ã
-                        case '\u00E4': 
-                        // ä
-                        case '\u00E5':  // å
-                            output[outputPos++] = 'a';
-                            break;
-                        
-                        case '\u00E6':  // æ
-                            output[outputPos++] = 'a';
-                            output[outputPos++] = 'e';
-                            break;
-                        
-                        case '\u00E7':  // ç
-                            output[outputPos++] = 'c';
-                            break;
-                        
-                        case '\u00E8': 
-                        // è
-                        case '\u00E9': 
-                        // é
-                        case '\u00EA': 
-                        // ê
-                        case '\u00EB':  // ë
-                            output[outputPos++] = 'e';
-                            break;
-                        
-                        case '\u00EC': 
-                        // ì
-                        case '\u00ED': 
-                        // í
-                        case '\u00EE': 
-                        // î
-                        case '\u00EF':  // ï
-                            output[outputPos++] = 'i';
-                            break;
-                        
-                        case '\u0133':  // ij
-                            output[outputPos++] = 'i';
-                            output[outputPos++] = 'j';
-                            break;
-                        
-                        case '\u00F0':  // ð
-                            output[outputPos++] = 'd';
-                            break;
-                        
-                        case '\u00F1':  // ñ
-                            output[outputPos++] = 'n';
-                            break;
-                        
-                        case '\u00F2': 
-                        // ò
-                        case '\u00F3': 
-                        // ó
-                        case '\u00F4': 
-                        // ô
-                        case '\u00F5': 
-                        // õ
-                        case '\u00F6': 
-                        // ö
-                        case '\u00F8':  // ø
-                            output[outputPos++] = 'o';
-                            break;
-                        
-                        case '\u0153':  // Å“
-                            output[outputPos++] = 'o';
-                            output[outputPos++] = 'e';
-                            break;
-                        
-                        case '\u00DF':  // ß
-                            output[outputPos++] = 's';
-                            output[outputPos++] = 's';
-                            break;
-                        
-                        case '\u00FE':  // þ
-                            output[outputPos++] = 't';
-                            output[outputPos++] = 'h';
-                            break;
-                        
-                        case '\u00F9': 
-                        // ù
-                        case '\u00FA': 
-                        // ú
-                        case '\u00FB': 
-                        // û
-                        case '\u00FC':  // ü
-                            output[outputPos++] = 'u';
-                            break;
-                        
-                        case '\u00FD': 
-                        // ý
-                        case '\u00FF':  // ÿ
-                            output[outputPos++] = 'y';
-                            break;
-                        
-                        case '\uFB00':  // ff
-                            output[outputPos++] = 'f';
-                            output[outputPos++] = 'f';
-                            break;
-                        
-                        case '\uFB01':  // �?
-                            output[outputPos++] = 'f';
-                            output[outputPos++] = 'i';
-                            break;
-                        
-                        case '\uFB02':  // fl
-                            output[outputPos++] = 'f';
-                            output[outputPos++] = 'l';
-                            break;
-                            // following 2 are commented as they can break the maxSizeNeeded (and doing *3 could be expensive)
-                            //        case '\uFB03': // ffi
-                            //            output[outputPos++] = 'f';
-                            //            output[outputPos++] = 'f';
-                            //            output[outputPos++] = 'i';
-                            //            break;
-                            //        case '\uFB04': // ffl
-                            //            output[outputPos++] = 'f';
-                            //            output[outputPos++] = 'f';
-                            //            output[outputPos++] = 'l';
-                            //            break;
-                        
-                        case '\uFB05':  // ſt
-                            output[outputPos++] = 'f';
-                            output[outputPos++] = 't';
-                            break;
-                        
-                        case '\uFB06':  // st
-                            output[outputPos++] = 's';
-                            output[outputPos++] = 't';
-                            break;
-                        
-                        default: 
-                            output[outputPos++] = c;
-                            break;
-                        
-                    }
-                }
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/KeywordAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/KeywordAnalyzer.cs b/src/Lucene.Net.Core/Analysis/KeywordAnalyzer.cs
deleted file mode 100644
index 9083816..0000000
--- a/src/Lucene.Net.Core/Analysis/KeywordAnalyzer.cs
+++ /dev/null
@@ -1,54 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> "Tokenizes" the entire stream as a single token. This is useful
-    /// for data like zip codes, ids, and some product names.
-    /// </summary>
-    public class KeywordAnalyzer:Analyzer
-    {
-        public KeywordAnalyzer()
-        {
-            SetOverridesTokenStreamMethod<KeywordAnalyzer>();
-        }
-        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-        {
-            return new KeywordTokenizer(reader);
-        }
-        public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
-        {
-            if (overridesTokenStreamMethod)
-            {
-                // LUCENE-1678: force fallback to tokenStream() if we
-                // have been subclassed and that subclass overrides
-                // tokenStream but not reusableTokenStream
-                return TokenStream(fieldName, reader);
-            }
-            var tokenizer = (Tokenizer) PreviousTokenStream;
-            if (tokenizer == null)
-            {
-                tokenizer = new KeywordTokenizer(reader);
-                PreviousTokenStream = tokenizer;
-            }
-            else
-                tokenizer.Reset(reader);
-            return tokenizer;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/KeywordTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/KeywordTokenizer.cs b/src/Lucene.Net.Core/Analysis/KeywordTokenizer.cs
deleted file mode 100644
index 38f6f8a..0000000
--- a/src/Lucene.Net.Core/Analysis/KeywordTokenizer.cs
+++ /dev/null
@@ -1,99 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using Lucene.Net.Analysis.Tokenattributes;
-using AttributeSource = Lucene.Net.Util.AttributeSource;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> Emits the entire input as a single token.</summary>
-    public sealed class KeywordTokenizer:Tokenizer
-    {
-        
-        private const int DEFAULT_BUFFER_SIZE = 256;
-        
-        private bool done;
-        private int finalOffset;
-        private ITermAttribute termAtt;
-        private IOffsetAttribute offsetAtt;
-        
-        public KeywordTokenizer(System.IO.TextReader input):this(input, DEFAULT_BUFFER_SIZE)
-        {
-        }
-        
-        public KeywordTokenizer(System.IO.TextReader input, int bufferSize):base(input)
-        {
-            Init(bufferSize);
-        }
-        
-        public KeywordTokenizer(AttributeSource source, System.IO.TextReader input, int bufferSize):base(source, input)
-        {
-            Init(bufferSize);
-        }
-        
-        public KeywordTokenizer(AttributeFactory factory, System.IO.TextReader input, int bufferSize):base(factory, input)
-        {
-            Init(bufferSize);
-        }
-        
-        private void  Init(int bufferSize)
-        {
-            this.done = false;
-            termAtt = AddAttribute<ITermAttribute>();
-            offsetAtt = AddAttribute<IOffsetAttribute>();
-            termAtt.ResizeTermBuffer(bufferSize);
-        }
-        
-        public override bool IncrementToken()
-        {
-            if (!done)
-            {
-                ClearAttributes();
-                done = true;
-                int upto = 0;
-                char[] buffer = termAtt.TermBuffer();
-                while (true)
-                {
-                    int length = input.Read(buffer, upto, buffer.Length - upto);
-                    if (length == 0)
-                        break;
-                    upto += length;
-                    if (upto == buffer.Length)
-                        buffer = termAtt.ResizeTermBuffer(1 + buffer.Length);
-                }
-                termAtt.SetTermLength(upto);
-                finalOffset = CorrectOffset(upto);
-                offsetAtt.SetOffset(CorrectOffset(0), finalOffset);
-                return true;
-            }
-            return false;
-        }
-        
-        public override void  End()
-        {
-            // set final offset 
-            offsetAtt.SetOffset(finalOffset, finalOffset);
-        }
-        
-        public override void  Reset(System.IO.TextReader input)
-        {
-            base.Reset(input);
-            this.done = false;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/LengthFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/LengthFilter.cs b/src/Lucene.Net.Core/Analysis/LengthFilter.cs
deleted file mode 100644
index 1a9899f..0000000
--- a/src/Lucene.Net.Core/Analysis/LengthFilter.cs
+++ /dev/null
@@ -1,60 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using Lucene.Net.Analysis.Tokenattributes;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary>Removes words that are too long or too short from the stream.</summary>
-    public sealed class LengthFilter:TokenFilter
-    {
-        
-        internal int min;
-        internal int max;
-        
-        private readonly ITermAttribute termAtt;
-        
-        /// <summary> Build a filter that removes words that are too long or too
-        /// short from the text.
-        /// </summary>
-        public LengthFilter(TokenStream in_Renamed, int min, int max)
-            : base(in_Renamed)
-        {
-            this.min = min;
-            this.max = max;
-            termAtt = AddAttribute<ITermAttribute>();
-        }
-        
-        /// <summary> Returns the next input Token whose term() is the right len</summary>
-        public override bool IncrementToken()
-        {
-            // return the first non-stop word found
-            while (input.IncrementToken())
-            {
-                var len = termAtt.TermLength();
-                if (len >= min && len <= max)
-                {
-                    return true;
-                }
-                // note: else we ignore it but should we index each part of it?
-            }
-            // reached EOS -- return false
-            return false;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/LetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/LetterTokenizer.cs b/src/Lucene.Net.Core/Analysis/LetterTokenizer.cs
deleted file mode 100644
index ecd0cae..0000000
--- a/src/Lucene.Net.Core/Analysis/LetterTokenizer.cs
+++ /dev/null
@@ -1,57 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using AttributeSource = Lucene.Net.Util.AttributeSource;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary>A LetterTokenizer is a tokenizer that divides text at non-letters.  That's
-    /// to say, it defines tokens as maximal strings of adjacent letters, as defined
-    /// by java.lang.Character.isLetter() predicate.
-    /// Note: this does a decent job for most European languages, but does a terrible
-    /// job for some Asian languages, where words are not separated by spaces. 
-    /// </summary>
-    
-    public class LetterTokenizer:CharTokenizer
-    {
-        /// <summary>Construct a new LetterTokenizer. </summary>
-        public LetterTokenizer(System.IO.TextReader @in):base(@in)
-        {
-        }
-        
-        /// <summary>Construct a new LetterTokenizer using a given <see cref="AttributeSource" />. </summary>
-        public LetterTokenizer(AttributeSource source, System.IO.TextReader @in)
-            : base(source, @in)
-        {
-        }
-        
-        /// <summary>Construct a new LetterTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
-        public LetterTokenizer(AttributeFactory factory, System.IO.TextReader @in)
-            : base(factory, @in)
-        {
-        }
-        
-        /// <summary>Collects only characters which satisfy
-        /// <see cref="char.IsLetter(char)" />.
-        /// </summary>
-        protected internal override bool IsTokenChar(char c)
-        {
-            return System.Char.IsLetter(c);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/LowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/LowerCaseFilter.cs b/src/Lucene.Net.Core/Analysis/LowerCaseFilter.cs
deleted file mode 100644
index b6dcca6..0000000
--- a/src/Lucene.Net.Core/Analysis/LowerCaseFilter.cs
+++ /dev/null
@@ -1,49 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using Lucene.Net.Analysis.Tokenattributes;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary>Normalizes token text to lower case.</summary>
-    public sealed class LowerCaseFilter:TokenFilter
-    {
-        public LowerCaseFilter(TokenStream @in)
-            : base(@in)
-        {
-            termAtt = AddAttribute<ITermAttribute>();
-        }
-        
-        private readonly ITermAttribute termAtt;
-        
-        public override bool IncrementToken()
-        {
-            if (input.IncrementToken())
-            {
-                
-                char[] buffer = termAtt.TermBuffer();
-                int length = termAtt.TermLength();
-                for (int i = 0; i < length; i++)
-                    buffer[i] = System.Char.ToLower(buffer[i]);
-                
-                return true;
-            }
-            return false;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/LowerCaseTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/LowerCaseTokenizer.cs b/src/Lucene.Net.Core/Analysis/LowerCaseTokenizer.cs
deleted file mode 100644
index 530b37c..0000000
--- a/src/Lucene.Net.Core/Analysis/LowerCaseTokenizer.cs
+++ /dev/null
@@ -1,60 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using AttributeSource = Lucene.Net.Util.AttributeSource;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> LowerCaseTokenizer performs the function of LetterTokenizer
-    /// and LowerCaseFilter together.  It divides text at non-letters and converts
-    /// them to lower case.  While it is functionally equivalent to the combination
-    /// of LetterTokenizer and LowerCaseFilter, there is a performance advantage
-    /// to doing the two tasks at once, hence this (redundant) implementation.
-    /// <p/>
-    /// Note: this does a decent job for most European languages, but does a terrible
-    /// job for some Asian languages, where words are not separated by spaces.
-    /// </summary>
-    public sealed class LowerCaseTokenizer:LetterTokenizer
-    {
-        /// <summary>Construct a new LowerCaseTokenizer. </summary>
-        public LowerCaseTokenizer(System.IO.TextReader @in)
-            : base(@in)
-        {
-        }
-        
-        /// <summary>Construct a new LowerCaseTokenizer using a given <see cref="AttributeSource" />. </summary>
-        public LowerCaseTokenizer(AttributeSource source, System.IO.TextReader @in)
-            : base(source, @in)
-        {
-        }
-        
-        /// <summary>Construct a new LowerCaseTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
-        public LowerCaseTokenizer(AttributeFactory factory, System.IO.TextReader @in)
-            : base(factory, @in)
-        {
-        }
-        
-        /// <summary>Converts char to lower case
-        /// <see cref="char.ToLower(char)" />.
-        /// </summary>
-        protected internal override char Normalize(char c)
-        {
-            return System.Char.ToLower(c);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/MappingCharFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/MappingCharFilter.cs b/src/Lucene.Net.Core/Analysis/MappingCharFilter.cs
deleted file mode 100644
index 9dd1c6d..0000000
--- a/src/Lucene.Net.Core/Analysis/MappingCharFilter.cs
+++ /dev/null
@@ -1,166 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System.Collections.Generic;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> Simplistic <see cref="CharFilter" /> that applies the mappings
-    /// contained in a <see cref="NormalizeCharMap" /> to the character
-    /// stream, and correcting the resulting changes to the
-    /// offsets.
-    /// </summary>
-    public class MappingCharFilter : BaseCharFilter
-    {
-        private readonly NormalizeCharMap normMap;
-        private LinkedList<char> buffer;
-        private System.String replacement;
-        private int charPointer;
-        private int nextCharCounter;
-        
-        /// Default constructor that takes a <see cref="CharStream" />.
-        public MappingCharFilter(NormalizeCharMap normMap, CharStream @in)
-            : base(@in)
-        {
-            this.normMap = normMap;
-        }
-        
-        /// Easy-use constructor that takes a <see cref="System.IO.TextReader" />.
-        public MappingCharFilter(NormalizeCharMap normMap, System.IO.TextReader @in)
-            : base(CharReader.Get(@in))
-        {
-            this.normMap = normMap;
-        }
-        
-        public  override int Read()
-        {
-            while (true)
-            {
-                if (replacement != null && charPointer < replacement.Length)
-                {
-                    return replacement[charPointer++];
-                }
-                
-                int firstChar = NextChar();
-                if (firstChar == - 1)
-                    return - 1;
-                NormalizeCharMap nm = normMap.submap != null
-                                          ? normMap.submap[(char) firstChar]
-                                          : null;
-                if (nm == null)
-                    return firstChar;
-                NormalizeCharMap result = Match(nm);
-                if (result == null)
-                    return firstChar;
-                replacement = result.normStr;
-                charPointer = 0;
-                if (result.diff != 0)
-                {
-                    int prevCumulativeDiff = LastCumulativeDiff;
-                    if (result.diff < 0)
-                    {
-                        for (int i = 0; i < - result.diff; i++)
-                            AddOffCorrectMap(nextCharCounter + i - prevCumulativeDiff, prevCumulativeDiff - 1 - i);
-                    }
-                    else
-                    {
-                        AddOffCorrectMap(nextCharCounter - result.diff - prevCumulativeDiff, prevCumulativeDiff + result.diff);
-                    }
-                }
-            }
-        }
-        
-        private int NextChar()
-        {
-            nextCharCounter++;
-            if (buffer != null && buffer.Count != 0)
-            {
-                char tempObject = buffer.First.Value;
-                buffer.RemoveFirst();
-                return (tempObject);
-            }
-            return input.Read();
-        }
-        
-        private void  PushChar(int c)
-        {
-            nextCharCounter--;
-            if (buffer == null)
-            {
-                buffer = new LinkedList<char>();
-            }
-            buffer.AddFirst((char)c);
-        }
-        
-        private void  PushLastChar(int c)
-        {
-            if (buffer == null)
-            {
-                buffer = new LinkedList<char>();
-            }
-            buffer.AddLast((char)c);
-        }
-        
-        private NormalizeCharMap Match(NormalizeCharMap map)
-        {
-            NormalizeCharMap result = null;
-            if (map.submap != null)
-            {
-                int chr = NextChar();
-                if (chr != - 1)
-                {
-                    NormalizeCharMap subMap = map.submap[(char)chr];
-                    if (subMap != null)
-                    {
-                        result = Match(subMap);
-                    }
-                    if (result == null)
-                    {
-                        PushChar(chr);
-                    }
-                }
-            }
-            if (result == null && map.normStr != null)
-            {
-                result = map;
-            }
-            return result;
-        }
-        
-        public  override int Read(System.Char[] cbuf, int off, int len)
-        {
-            var tmp = new char[len];
-            int l = input.Read(tmp, 0, len);
-            if (l != 0)
-            {
-                for (int i = 0; i < l; i++)
-                    PushLastChar(tmp[i]);
-            }
-            l = 0;
-            for (int i = off; i < off + len; i++)
-            {
-                int c = Read();
-                if (c == - 1)
-                    break;
-                cbuf[i] = (char) c;
-                l++;
-            }
-            return l == 0?- 1:l;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/NormalizeCharMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/NormalizeCharMap.cs b/src/Lucene.Net.Core/Analysis/NormalizeCharMap.cs
deleted file mode 100644
index 5d6d558..0000000
--- a/src/Lucene.Net.Core/Analysis/NormalizeCharMap.cs
+++ /dev/null
@@ -1,68 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using Lucene.Net.Support;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> Holds a map of String input to String output, to be used
-    /// with <see cref="MappingCharFilter" />.
-    /// </summary>
-    public class NormalizeCharMap
-    {
-        internal System.Collections.Generic.IDictionary<char, NormalizeCharMap> submap;
-        internal System.String normStr;
-        internal int diff;
-        
-        /// <summary>Records a replacement to be applied to the inputs
-        /// stream.  Whenever <c>singleMatch</c> occurs in
-        /// the input, it will be replaced with
-        /// <c>replacement</c>.
-        /// 
-        /// </summary>
-        /// <param name="singleMatch">input String to be replaced
-        /// </param>
-        /// <param name="replacement">output String
-        /// </param>
-        public virtual void  Add(System.String singleMatch, System.String replacement)
-        {
-            NormalizeCharMap currMap = this;
-            for (var i = 0; i < singleMatch.Length; i++)
-            {
-                char c = singleMatch[i];
-                if (currMap.submap == null)
-                {
-                    currMap.submap = new HashMap<char, NormalizeCharMap>(1);
-                }
-                var map = currMap.submap[c];
-                if (map == null)
-                {
-                    map = new NormalizeCharMap();
-                    currMap.submap[c] = map;
-                }
-                currMap = map;
-            }
-            if (currMap.normStr != null)
-            {
-                throw new System.SystemException("MappingCharFilter: there is already a mapping for " + singleMatch);
-            }
-            currMap.normStr = replacement;
-            currMap.diff = singleMatch.Length - replacement.Length;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/PerFieldAnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/PerFieldAnalyzerWrapper.cs b/src/Lucene.Net.Core/Analysis/PerFieldAnalyzerWrapper.cs
deleted file mode 100644
index 45e2344..0000000
--- a/src/Lucene.Net.Core/Analysis/PerFieldAnalyzerWrapper.cs
+++ /dev/null
@@ -1,135 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System.Collections.Generic;
-using Lucene.Net.Support;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary> This analyzer is used to facilitate scenarios where different
-    /// fields require different analysis techniques.  Use <see cref="AddAnalyzer" />
-    /// to add a non-default analyzer on a field name basis.
-    /// 
-    /// <p/>Example usage:
-    /// 
-    /// <code>
-    /// PerFieldAnalyzerWrapper aWrapper =
-    /// new PerFieldAnalyzerWrapper(new StandardAnalyzer());
-    /// aWrapper.addAnalyzer("firstname", new KeywordAnalyzer());
-    /// aWrapper.addAnalyzer("lastname", new KeywordAnalyzer());
-    /// </code>
-    /// 
-    /// <p/>In this example, StandardAnalyzer will be used for all fields except "firstname"
-    /// and "lastname", for which KeywordAnalyzer will be used.
-    /// 
-    /// <p/>A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing
-    /// and query parsing.
-    /// </summary>
-    public class PerFieldAnalyzerWrapper:Analyzer
-    {
-        private readonly Analyzer defaultAnalyzer;
-        private readonly IDictionary<string, Analyzer> analyzerMap = new HashMap<string, Analyzer>();
-        
-        
-        /// <summary> Constructs with default analyzer.
-        /// 
-        /// </summary>
-        /// <param name="defaultAnalyzer">Any fields not specifically
-        /// defined to use a different analyzer will use the one provided here.
-        /// </param>
-        public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer)
-            : this(defaultAnalyzer, null)
-        {
-        }
-        
-        /// <summary> Constructs with default analyzer and a map of analyzers to use for 
-        /// specific fields.
-        /// 
-        /// </summary>
-        /// <param name="defaultAnalyzer">Any fields not specifically
-        /// defined to use a different analyzer will use the one provided here.
-        /// </param>
-        /// <param name="fieldAnalyzers">a Map (String field name to the Analyzer) to be 
-        /// used for those fields 
-        /// </param>
-        public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer, IEnumerable<KeyValuePair<string, Analyzer>> fieldAnalyzers)
-        {
-            this.defaultAnalyzer = defaultAnalyzer;
-            if (fieldAnalyzers != null)
-            {
-                foreach(var entry in fieldAnalyzers)
-                    analyzerMap[entry.Key] = entry.Value;
-            }
-            SetOverridesTokenStreamMethod<PerFieldAnalyzerWrapper>();
-        }
-        
-        
-        /// <summary> Defines an analyzer to use for the specified field.
-        /// 
-        /// </summary>
-        /// <param name="fieldName">field name requiring a non-default analyzer
-        /// </param>
-        /// <param name="analyzer">non-default analyzer to use for field
-        /// </param>
-        public virtual void  AddAnalyzer(System.String fieldName, Analyzer analyzer)
-        {
-            analyzerMap[fieldName] = analyzer;
-        }
-        
-        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-        {
-            var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
-
-            return analyzer.TokenStream(fieldName, reader);
-        }
-        
-        public override TokenStream ReusableTokenStream(string fieldName, System.IO.TextReader reader)
-        {
-            if (overridesTokenStreamMethod)
-            {
-                // LUCENE-1678: force fallback to tokenStream() if we
-                // have been subclassed and that subclass overrides
-                // tokenStream but not reusableTokenStream
-                return TokenStream(fieldName, reader);
-            }
-            var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
-
-            return analyzer.ReusableTokenStream(fieldName, reader);
-        }
-        
-        /// <summary>Return the positionIncrementGap from the analyzer assigned to fieldName </summary>
-        public override int GetPositionIncrementGap(string fieldName)
-        {
-            var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
-            return analyzer.GetPositionIncrementGap(fieldName);
-        }
-
-        /// <summary> Return the offsetGap from the analyzer assigned to field </summary>
-        public override int GetOffsetGap(Documents.IFieldable field)
-        {
-            Analyzer analyzer = analyzerMap[field.Name] ?? defaultAnalyzer;
-            return analyzer.GetOffsetGap(field);
-        }
-        
-        public override System.String ToString()
-        {
-            // {{Aroush-2.9}} will 'analyzerMap.ToString()' work in the same way as Java's java.util.HashMap.toString()? 
-            return "PerFieldAnalyzerWrapper(" + analyzerMap + ", default=" + defaultAnalyzer + ")";
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8a97bfcf/src/Lucene.Net.Core/Analysis/PorterStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Analysis/PorterStemFilter.cs b/src/Lucene.Net.Core/Analysis/PorterStemFilter.cs
deleted file mode 100644
index b4f14dc..0000000
--- a/src/Lucene.Net.Core/Analysis/PorterStemFilter.cs
+++ /dev/null
@@ -1,62 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using Lucene.Net.Analysis.Tokenattributes;
-
-namespace Lucene.Net.Analysis
-{
-    
-    /// <summary>Transforms the token stream as per the Porter stemming algorithm.
-    /// Note: the input to the stemming filter must already be in lower case,
-    /// so you will need to use LowerCaseFilter or LowerCaseTokenizer farther
-    /// down the Tokenizer chain in order for this to work properly!
-    /// <p/>
-    /// To use this filter with other analyzers, you'll want to write an
-    /// Analyzer class that sets up the TokenStream chain as you want it.
-    /// To use this with LowerCaseTokenizer, for example, you'd write an
-    /// analyzer like this:
-    /// <p/>
-    /// <code>
-    /// class MyAnalyzer extends Analyzer {
-    ///     public final TokenStream tokenStream(String fieldName, Reader reader) {
-    ///          return new PorterStemFilter(new LowerCaseTokenizer(reader));
-    ///     }
-    /// }
-    /// </code>
-    /// </summary>
-    public sealed class PorterStemFilter:TokenFilter
-    {
-        private readonly PorterStemmer stemmer;
-        private readonly ITermAttribute termAtt;
-        
-        public PorterStemFilter(TokenStream in_Renamed):base(in_Renamed)
-        {
-            stemmer = new PorterStemmer();
-            termAtt = AddAttribute<ITermAttribute>();
-        }
-        
-        public override bool IncrementToken()
-        {
-            if (!input.IncrementToken())
-                return false;
-            
-            if (stemmer.Stem(termAtt.TermBuffer(), 0, termAtt.TermLength()))
-                termAtt.SetTermBuffer(stemmer.ResultBuffer, 0, stemmer.ResultLength);
-            return true;
-        }
-    }
-}
\ No newline at end of file


Mime
View raw message