lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ccurr...@apache.org
Subject [23/51] [partial] Mass convert mixed tabs to spaces
Date Wed, 03 Apr 2013 17:40:06 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs b/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs
index 7e313ce..6f6fe48 100644
--- a/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs
@@ -21,11 +21,11 @@ using Payload = Lucene.Net.Index.Payload;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
-	public interface IPayloadAttribute:IAttribute
-	{
-	    /// <summary> Returns this Token's payload.</summary>
-	    Payload Payload { get; set; }
-	}
+    
+    /// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
+    public interface IPayloadAttribute:IAttribute
+    {
+        /// <summary> Returns this Token's payload.</summary>
+        Payload Payload { get; set; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs b/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs
index 6c2a131..2bb9af0 100644
--- a/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs
@@ -20,40 +20,40 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary>The positionIncrement determines the position of this token
-	/// relative to the previous Token in a TokenStream, used in phrase
-	/// searching.
-	/// 
-	/// <p/>The default value is one.
-	/// 
-	/// <p/>Some common uses for this are:<list>
-	/// 
-	/// <item>Set it to zero to put multiple terms in the same position.  This is
-	/// useful if, e.g., a word has multiple stems.  Searches for phrases
-	/// including either stem will match.  In this case, all but the first stem's
-	/// increment should be set to zero: the increment of the first instance
-	/// should be one.  Repeating a token with an increment of zero can also be
-	/// used to boost the scores of matches on that token.</item>
-	/// 
-	/// <item>Set it to values greater than one to inhibit exact phrase matches.
-	/// If, for example, one does not want phrases to match across removed stop
-	/// words, then one could build a stop word filter that removes stop words and
-	/// also sets the increment to the number of stop words removed before each
-	/// non-stop word.  Then exact phrase queries will only match when the terms
-	/// occur with no intervening stop words.</item>
-	/// 
-	/// </list>
-	/// 
-	/// </summary>
-	/// <seealso cref="Lucene.Net.Index.TermPositions">
-	/// </seealso>
-	public interface IPositionIncrementAttribute:IAttribute
-	{
-	    /// <summary>Gets or sets the position increment. The default value is one.
-	    /// 
-	    /// </summary>
-	    /// <value> the distance from the prior term </value>
-	    int PositionIncrement { set; get; }
-	}
+    
+    /// <summary>The positionIncrement determines the position of this token
+    /// relative to the previous Token in a TokenStream, used in phrase
+    /// searching.
+    /// 
+    /// <p/>The default value is one.
+    /// 
+    /// <p/>Some common uses for this are:<list>
+    /// 
+    /// <item>Set it to zero to put multiple terms in the same position.  This is
+    /// useful if, e.g., a word has multiple stems.  Searches for phrases
+    /// including either stem will match.  In this case, all but the first stem's
+    /// increment should be set to zero: the increment of the first instance
+    /// should be one.  Repeating a token with an increment of zero can also be
+    /// used to boost the scores of matches on that token.</item>
+    /// 
+    /// <item>Set it to values greater than one to inhibit exact phrase matches.
+    /// If, for example, one does not want phrases to match across removed stop
+    /// words, then one could build a stop word filter that removes stop words and
+    /// also sets the increment to the number of stop words removed before each
+    /// non-stop word.  Then exact phrase queries will only match when the terms
+    /// occur with no intervening stop words.</item>
+    /// 
+    /// </list>
+    /// 
+    /// </summary>
+    /// <seealso cref="Lucene.Net.Index.TermPositions">
+    /// </seealso>
+    public interface IPositionIncrementAttribute:IAttribute
+    {
+        /// <summary>Gets or sets the position increment. The default value is one.
+        /// 
+        /// </summary>
+        /// <value> the distance from the prior term </value>
+        int PositionIncrement { set; get; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/ITermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/ITermAttribute.cs b/src/core/Analysis/Tokenattributes/ITermAttribute.cs
index 8f9b030..2e7db2a 100644
--- a/src/core/Analysis/Tokenattributes/ITermAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/ITermAttribute.cs
@@ -20,85 +20,85 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The term text of a Token.</summary>
-	public interface ITermAttribute:IAttribute
-	{
-	    /// <summary>Returns the Token's term text.
-	    /// 
-	    /// This method has a performance penalty
-	    /// because the text is stored internally in a char[].  If
-	    /// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()" />
-	    /// directly instead.  If you really need a
-	    /// String, use this method, which is nothing more than
-	    /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
-	    /// </summary>
-	    string Term { get; }
+    
+    /// <summary> The term text of a Token.</summary>
+    public interface ITermAttribute:IAttribute
+    {
+        /// <summary>Returns the Token's term text.
+        /// 
+        /// This method has a performance penalty
+        /// because the text is stored internally in a char[].  If
+        /// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()" />
+        /// directly instead.  If you really need a
+        /// String, use this method, which is nothing more than
+        /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+        /// </summary>
+        string Term { get; }
 
-	    /// <summary>Copies the contents of buffer, starting at offset for
-		/// length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		void  SetTermBuffer(char[] buffer, int offset, int length);
-		
-		/// <summary>Copies the contents of buffer into the termBuffer array.</summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		void  SetTermBuffer(System.String buffer);
-		
-		/// <summary>Copies the contents of buffer, starting at offset and continuing
-		/// for length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		void  SetTermBuffer(System.String buffer, int offset, int length);
-		
-		/// <summary>Returns the internal termBuffer character array which
-		/// you can then directly alter.  If the array is too
-		/// small for your token, use <see cref="ResizeTermBuffer(int)" />
-		/// to increase it.  After
-		/// altering the buffer be sure to call <see cref="SetTermLength" />
-		/// to record the number of valid
-		/// characters that were placed into the termBuffer. 
-		/// </summary>
-		char[] TermBuffer();
-		
-		/// <summary>Grows the termBuffer to at least size newSize, preserving the
-		/// existing content. Note: If the next operation is to change
-		/// the contents of the term buffer use
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetTermBuffer(String)" />, or
-		/// <see cref="SetTermBuffer(String, int, int)" />
-		/// to optimally combine the resize with the setting of the termBuffer.
-		/// </summary>
-		/// <param name="newSize">minimum size of the new termBuffer
-		/// </param>
-		/// <returns> newly created termBuffer with length >= newSize
-		/// </returns>
-		char[] ResizeTermBuffer(int newSize);
-		
-		/// <summary>Return number of valid characters (length of the term)
-		/// in the termBuffer array. 
-		/// </summary>
-		int TermLength();
-		
-		/// <summary>Set number of valid characters (length of the term) in
-		/// the termBuffer array. Use this to truncate the termBuffer
-		/// or to synchronize with external manipulation of the termBuffer.
-		/// Note: to grow the size of the array,
-		/// use <see cref="ResizeTermBuffer(int)" /> first.
-		/// </summary>
-		/// <param name="length">the truncated length
-		/// </param>
-		void  SetTermLength(int length);
-	}
+        /// <summary>Copies the contents of buffer, starting at offset for
+        /// length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        void  SetTermBuffer(char[] buffer, int offset, int length);
+        
+        /// <summary>Copies the contents of buffer into the termBuffer array.</summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        void  SetTermBuffer(System.String buffer);
+        
+        /// <summary>Copies the contents of buffer, starting at offset and continuing
+        /// for length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        void  SetTermBuffer(System.String buffer, int offset, int length);
+        
+        /// <summary>Returns the internal termBuffer character array which
+        /// you can then directly alter.  If the array is too
+        /// small for your token, use <see cref="ResizeTermBuffer(int)" />
+        /// to increase it.  After
+        /// altering the buffer be sure to call <see cref="SetTermLength" />
+        /// to record the number of valid
+        /// characters that were placed into the termBuffer. 
+        /// </summary>
+        char[] TermBuffer();
+        
+        /// <summary>Grows the termBuffer to at least size newSize, preserving the
+        /// existing content. Note: If the next operation is to change
+        /// the contents of the term buffer use
+        /// <see cref="SetTermBuffer(char[], int, int)" />,
+        /// <see cref="SetTermBuffer(String)" />, or
+        /// <see cref="SetTermBuffer(String, int, int)" />
+        /// to optimally combine the resize with the setting of the termBuffer.
+        /// </summary>
+        /// <param name="newSize">minimum size of the new termBuffer
+        /// </param>
+        /// <returns> newly created termBuffer with length >= newSize
+        /// </returns>
+        char[] ResizeTermBuffer(int newSize);
+        
+        /// <summary>Return number of valid characters (length of the term)
+        /// in the termBuffer array. 
+        /// </summary>
+        int TermLength();
+        
+        /// <summary>Set number of valid characters (length of the term) in
+        /// the termBuffer array. Use this to truncate the termBuffer
+        /// or to synchronize with external manipulation of the termBuffer.
+        /// Note: to grow the size of the array,
+        /// use <see cref="ResizeTermBuffer(int)" /> first.
+        /// </summary>
+        /// <param name="length">the truncated length
+        /// </param>
+        void  SetTermLength(int length);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/ITypeAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/ITypeAttribute.cs b/src/core/Analysis/Tokenattributes/ITypeAttribute.cs
index 48bcc10..81ccc62 100644
--- a/src/core/Analysis/Tokenattributes/ITypeAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/ITypeAttribute.cs
@@ -20,11 +20,11 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> A Token's lexical type. The Default value is "word". </summary>
-	public interface ITypeAttribute:IAttribute
-	{
-	    /// <summary>Gets or sets this Token's lexical type.  Defaults to "word". </summary>
-	    string Type { get; set; }
-	}
+    
+    /// <summary> A Token's lexical type. The Default value is "word". </summary>
+    public interface ITypeAttribute:IAttribute
+    {
+        /// <summary>Gets or sets this Token's lexical type.  Defaults to "word". </summary>
+        string Type { get; set; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/OffsetAttribute.cs b/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
index 5149559..f329b03 100644
--- a/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
@@ -20,87 +20,87 @@ using Attribute = Lucene.Net.Util.Attribute;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The start and end character offset of a Token. </summary>
-	[Serializable]
-	public class OffsetAttribute:Attribute, IOffsetAttribute, System.ICloneable
-	{
-		private int startOffset;
-		private int endOffset;
+    
+    /// <summary> The start and end character offset of a Token. </summary>
+    [Serializable]
+    public class OffsetAttribute:Attribute, IOffsetAttribute, System.ICloneable
+    {
+        private int startOffset;
+        private int endOffset;
 
-	    /// <summary>Returns this Token's starting offset, the position of the first character
-	    /// corresponding to this token in the source text.
-	    /// Note that the difference between endOffset() and startOffset() may not be
-	    /// equal to termText.length(), as the term text may have been altered by a
-	    /// stemmer or some other filter. 
-	    /// </summary>
-	    public virtual int StartOffset
-	    {
-	        get { return startOffset; }
-	    }
+        /// <summary>Returns this Token's starting offset, the position of the first character
+        /// corresponding to this token in the source text.
+        /// Note that the difference between endOffset() and startOffset() may not be
+        /// equal to termText.length(), as the term text may have been altered by a
+        /// stemmer or some other filter. 
+        /// </summary>
+        public virtual int StartOffset
+        {
+            get { return startOffset; }
+        }
 
 
-	    /// <summary>Set the starting and ending offset.
+        /// <summary>Set the starting and ending offset.
         /// See StartOffset() and EndOffset()
         /// </summary>
-		public virtual void  SetOffset(int startOffset, int endOffset)
-		{
-			this.startOffset = startOffset;
-			this.endOffset = endOffset;
-		}
+        public virtual void  SetOffset(int startOffset, int endOffset)
+        {
+            this.startOffset = startOffset;
+            this.endOffset = endOffset;
+        }
 
 
-	    /// <summary>Returns this Token's ending offset, one greater than the position of the
-	    /// last character corresponding to this token in the source text. The length
-	    /// of the token in the source text is (endOffset - startOffset). 
-	    /// </summary>
-	    public virtual int EndOffset
-	    {
-	        get { return endOffset; }
-	    }
+        /// <summary>Returns this Token's ending offset, one greater than the position of the
+        /// last character corresponding to this token in the source text. The length
+        /// of the token in the source text is (endOffset - startOffset). 
+        /// </summary>
+        public virtual int EndOffset
+        {
+            get { return endOffset; }
+        }
 
 
-	    public override void  Clear()
-		{
-			startOffset = 0;
-			endOffset = 0;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is OffsetAttribute)
-			{
-				OffsetAttribute o = (OffsetAttribute) other;
-				return o.startOffset == startOffset && o.endOffset == endOffset;
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			int code = startOffset;
-			code = code * 31 + endOffset;
-			return code;
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			IOffsetAttribute t = (IOffsetAttribute) target;
-			t.SetOffset(startOffset, endOffset);
-		}
-		
-		override public System.Object Clone()
-		{
+        public override void  Clear()
+        {
+            startOffset = 0;
+            endOffset = 0;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is OffsetAttribute)
+            {
+                OffsetAttribute o = (OffsetAttribute) other;
+                return o.startOffset == startOffset && o.endOffset == endOffset;
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            int code = startOffset;
+            code = code * 31 + endOffset;
+            return code;
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            IOffsetAttribute t = (IOffsetAttribute) target;
+            t.SetOffset(startOffset, endOffset);
+        }
+        
+        override public System.Object Clone()
+        {
             OffsetAttribute impl = new OffsetAttribute();
             impl.endOffset = endOffset;
             impl.startOffset = startOffset;
             return impl;
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/PayloadAttribute.cs b/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
index ae1c4d9..7bd7cbe 100644
--- a/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
@@ -21,80 +21,80 @@ using Payload = Lucene.Net.Index.Payload;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
-	[Serializable]
-	public class PayloadAttribute:Attribute, IPayloadAttribute, System.ICloneable
-	{
-		private Payload payload;
-		
-		/// <summary> Initialize this attribute with no payload.</summary>
-		public PayloadAttribute()
-		{
-		}
-		
-		/// <summary> Initialize this attribute with the given payload. </summary>
-		public PayloadAttribute(Payload payload)
-		{
-			this.payload = payload;
-		}
+    
+    /// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
+    [Serializable]
+    public class PayloadAttribute:Attribute, IPayloadAttribute, System.ICloneable
+    {
+        private Payload payload;
+        
+        /// <summary> Initialize this attribute with no payload.</summary>
+        public PayloadAttribute()
+        {
+        }
+        
+        /// <summary> Initialize this attribute with the given payload. </summary>
+        public PayloadAttribute(Payload payload)
+        {
+            this.payload = payload;
+        }
 
-	    /// <summary> Returns this Token's payload.</summary>
-	    public virtual Payload Payload
-	    {
-	        get { return this.payload; }
-	        set { this.payload = value; }
-	    }
+        /// <summary> Returns this Token's payload.</summary>
+        public virtual Payload Payload
+        {
+            get { return this.payload; }
+            set { this.payload = value; }
+        }
 
-	    public override void  Clear()
-		{
-			payload = null;
-		}
-		
-		public override System.Object Clone()
-		{
-		    var clone = (PayloadAttribute) base.Clone();
+        public override void  Clear()
+        {
+            payload = null;
+        }
+        
+        public override System.Object Clone()
+        {
+            var clone = (PayloadAttribute) base.Clone();
             if (payload != null)
             {
                 clone.payload = (Payload) payload.Clone();
             }
-		    return clone;
+            return clone;
             // TODO: This code use to be as below.  Any reason why?  the if(payload!=null) was missing...
-		    //PayloadAttributeImpl impl = new PayloadAttributeImpl();
-		    //impl.payload = new Payload(this.payload.data, this.payload.offset, this.payload.length);
-		    //return impl;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is IPayloadAttribute)
-			{
-				PayloadAttribute o = (PayloadAttribute) other;
-				if (o.payload == null || payload == null)
-				{
-					return o.payload == null && payload == null;
-				}
-				
-				return o.payload.Equals(payload);
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return (payload == null)?0:payload.GetHashCode();
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			IPayloadAttribute t = (IPayloadAttribute) target;
-			t.Payload = (payload == null)?null:(Payload) payload.Clone();
-		}
-	}
+            //PayloadAttributeImpl impl = new PayloadAttributeImpl();
+            //impl.payload = new Payload(this.payload.data, this.payload.offset, this.payload.length);
+            //return impl;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is IPayloadAttribute)
+            {
+                PayloadAttribute o = (PayloadAttribute) other;
+                if (o.payload == null || payload == null)
+                {
+                    return o.payload == null && payload == null;
+                }
+                
+                return o.payload.Equals(payload);
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return (payload == null)?0:payload.GetHashCode();
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            IPayloadAttribute t = (IPayloadAttribute) target;
+            t.Payload = (payload == null)?null:(Payload) payload.Clone();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs b/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
index 4f7a04f..b2293ca 100644
--- a/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
@@ -21,87 +21,87 @@ using TokenStream = Lucene.Net.Analysis.TokenStream;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary>The positionIncrement determines the position of this token
-	/// relative to the previous Token in a <see cref="TokenStream" />, used in phrase
-	/// searching.
-	/// 
-	/// <p/>The default value is one.
-	/// 
-	/// <p/>Some common uses for this are:<list>
-	/// 
-	/// <item>Set it to zero to put multiple terms in the same position.  This is
-	/// useful if, e.g., a word has multiple stems.  Searches for phrases
-	/// including either stem will match.  In this case, all but the first stem's
-	/// increment should be set to zero: the increment of the first instance
-	/// should be one.  Repeating a token with an increment of zero can also be
-	/// used to boost the scores of matches on that token.</item>
-	/// 
-	/// <item>Set it to values greater than one to inhibit exact phrase matches.
-	/// If, for example, one does not want phrases to match across removed stop
-	/// words, then one could build a stop word filter that removes stop words and
-	/// also sets the increment to the number of stop words removed before each
-	/// non-stop word.  Then exact phrase queries will only match when the terms
-	/// occur with no intervening stop words.</item>
-	/// 
-	/// </list>
-	/// </summary>
-	[Serializable]
-	public class PositionIncrementAttribute:Attribute, IPositionIncrementAttribute, System.ICloneable
-	{
-		private int positionIncrement = 1;
+    
+    /// <summary>The positionIncrement determines the position of this token
+    /// relative to the previous Token in a <see cref="TokenStream" />, used in phrase
+    /// searching.
+    /// 
+    /// <p/>The default value is one.
+    /// 
+    /// <p/>Some common uses for this are:<list>
+    /// 
+    /// <item>Set it to zero to put multiple terms in the same position.  This is
+    /// useful if, e.g., a word has multiple stems.  Searches for phrases
+    /// including either stem will match.  In this case, all but the first stem's
+    /// increment should be set to zero: the increment of the first instance
+    /// should be one.  Repeating a token with an increment of zero can also be
+    /// used to boost the scores of matches on that token.</item>
+    /// 
+    /// <item>Set it to values greater than one to inhibit exact phrase matches.
+    /// If, for example, one does not want phrases to match across removed stop
+    /// words, then one could build a stop word filter that removes stop words and
+    /// also sets the increment to the number of stop words removed before each
+    /// non-stop word.  Then exact phrase queries will only match when the terms
+    /// occur with no intervening stop words.</item>
+    /// 
+    /// </list>
+    /// </summary>
+    [Serializable]
+    public class PositionIncrementAttribute:Attribute, IPositionIncrementAttribute, System.ICloneable
+    {
+        private int positionIncrement = 1;
 
-	    /// <summary>Set the position increment. The default value is one.
-	    /// 
-	    /// </summary>
-	    /// <value> the distance from the prior term </value>
-	    public virtual int PositionIncrement
-	    {
-	        set
-	        {
-	            if (value < 0)
-	                throw new System.ArgumentException("Increment must be zero or greater: " + value);
-	            this.positionIncrement = value;
-	        }
-	        get { return positionIncrement; }
-	    }
+        /// <summary>Set the position increment. The default value is one.
+        /// 
+        /// </summary>
+        /// <value> the distance from the prior term </value>
+        public virtual int PositionIncrement
+        {
+            set
+            {
+                if (value < 0)
+                    throw new System.ArgumentException("Increment must be zero or greater: " + value);
+                this.positionIncrement = value;
+            }
+            get { return positionIncrement; }
+        }
 
-	    public override void  Clear()
-		{
-			this.positionIncrement = 1;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is PositionIncrementAttribute)
-			{
-				return positionIncrement == ((PositionIncrementAttribute) other).positionIncrement;
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return positionIncrement;
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			IPositionIncrementAttribute t = (IPositionIncrementAttribute) target;
-			t.PositionIncrement = positionIncrement;
-		}
-		
-		override public System.Object Clone()
-		{
+        public override void  Clear()
+        {
+            this.positionIncrement = 1;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is PositionIncrementAttribute)
+            {
+                return positionIncrement == ((PositionIncrementAttribute) other).positionIncrement;
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return positionIncrement;
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            IPositionIncrementAttribute t = (IPositionIncrementAttribute) target;
+            t.PositionIncrement = positionIncrement;
+        }
+        
+        override public System.Object Clone()
+        {
             PositionIncrementAttribute impl = new PositionIncrementAttribute();
             impl.positionIncrement = positionIncrement;
             return impl;
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/TermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/TermAttribute.cs b/src/core/Analysis/Tokenattributes/TermAttribute.cs
index f95402c..3dad641 100644
--- a/src/core/Analysis/Tokenattributes/TermAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/TermAttribute.cs
@@ -22,247 +22,247 @@ using Attribute = Lucene.Net.Util.Attribute;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The term text of a Token.</summary>
-	[Serializable]
-	public class TermAttribute:Attribute, ITermAttribute, System.ICloneable
-	{
-		private static int MIN_BUFFER_SIZE = 10;
-		
-		private char[] termBuffer;
-		private int termLength;
+    
+    /// <summary> The term text of a Token.</summary>
+    [Serializable]
+    public class TermAttribute:Attribute, ITermAttribute, System.ICloneable
+    {
+        private static int MIN_BUFFER_SIZE = 10;
+        
+        private char[] termBuffer;
+        private int termLength;
 
-	    /// <summary>Returns the Token's term text.
-	    /// 
-	    /// This method has a performance penalty
-	    /// because the text is stored internally in a char[].  If
-	    /// possible, use <see cref="TermBuffer()" /> and 
-	    /// <see cref="TermLength()" /> directly instead.  If you 
-	    /// really need a String, use this method, which is nothing more than
-	    /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
-	    /// </summary>
-	    public virtual string Term
-	    {
-	        get
-	        {
-	            InitTermBuffer();
-	            return new System.String(termBuffer, 0, termLength);
-	        }
-	    }
+        /// <summary>Returns the Token's term text.
+        /// 
+        /// This method has a performance penalty
+        /// because the text is stored internally in a char[].  If
+        /// possible, use <see cref="TermBuffer()" /> and 
+        /// <see cref="TermLength()" /> directly instead.  If you 
+        /// really need a String, use this method, which is nothing more than
+        /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+        /// </summary>
+        public virtual string Term
+        {
+            get
+            {
+                InitTermBuffer();
+                return new System.String(termBuffer, 0, termLength);
+            }
+        }
 
-	    /// <summary>Copies the contents of buffer, starting at offset for
-		/// length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		public virtual void  SetTermBuffer(char[] buffer, int offset, int length)
-		{
-			GrowTermBuffer(length);
-			Array.Copy(buffer, offset, termBuffer, 0, length);
-			termLength = length;
-		}
-		
-		/// <summary>Copies the contents of buffer into the termBuffer array.</summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		public virtual void  SetTermBuffer(System.String buffer)
-		{
-			int length = buffer.Length;
-			GrowTermBuffer(length);
-			TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
-			termLength = length;
-		}
-		
-		/// <summary>Copies the contents of buffer, starting at offset and continuing
-		/// for length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		public virtual void  SetTermBuffer(System.String buffer, int offset, int length)
-		{
-			System.Diagnostics.Debug.Assert(offset <= buffer.Length);
-			System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
-			GrowTermBuffer(length);
-			TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
-			termLength = length;
-		}
-		
-		/// <summary>Returns the internal termBuffer character array which
-		/// you can then directly alter.  If the array is too
-		/// small for your token, use <see cref="ResizeTermBuffer(int)" />
-		/// to increase it.  After
-		/// altering the buffer be sure to call <see cref="SetTermLength" />
-		/// to record the number of valid
-		/// characters that were placed into the termBuffer. 
-		/// </summary>
-		public virtual char[] TermBuffer()
-		{
-			InitTermBuffer();
-			return termBuffer;
-		}
-		
-		/// <summary>Grows the termBuffer to at least size newSize, preserving the
-		/// existing content. Note: If the next operation is to change
-		/// the contents of the term buffer use
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetTermBuffer(String)" />, or
-		/// <see cref="SetTermBuffer(String, int, int)" />
-		/// to optimally combine the resize with the setting of the termBuffer.
-		/// </summary>
-		/// <param name="newSize">minimum size of the new termBuffer
-		/// </param>
-		/// <returns> newly created termBuffer with length >= newSize
-		/// </returns>
-		public virtual char[] ResizeTermBuffer(int newSize)
-		{
-			if (termBuffer == null)
-			{
-				// The buffer is always at least MIN_BUFFER_SIZE
-				termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
-			}
-			else
-			{
-				if (termBuffer.Length < newSize)
-				{
-					// Not big enough; create a new array with slight
-					// over allocation and preserve content
-					char[] newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
-					Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
-					termBuffer = newCharBuffer;
-				}
-			}
-			return termBuffer;
-		}
-		
-		
-		/// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
-		/// its always used in places that set the content 
-		/// </summary>
-		/// <param name="newSize">minimum size of the buffer
-		/// </param>
-		private void  GrowTermBuffer(int newSize)
-		{
-			if (termBuffer == null)
-			{
-				// The buffer is always at least MIN_BUFFER_SIZE
-				termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
-			}
-			else
-			{
-				if (termBuffer.Length < newSize)
-				{
-					// Not big enough; create a new array with slight
-					// over allocation:
-					termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
-				}
-			}
-		}
-		
-		private void  InitTermBuffer()
-		{
-			if (termBuffer == null)
-			{
-				termBuffer = new char[ArrayUtil.GetNextSize(MIN_BUFFER_SIZE)];
-				termLength = 0;
-			}
-		}
-		
-		/// <summary>Return number of valid characters (length of the term)
-		/// in the termBuffer array. 
-		/// </summary>
-		public virtual int TermLength()
-		{
-			return termLength;
-		}
-		
-		/// <summary>Set number of valid characters (length of the term) in
-		/// the termBuffer array. Use this to truncate the termBuffer
-		/// or to synchronize with external manipulation of the termBuffer.
-		/// Note: to grow the size of the array,
-		/// use <see cref="ResizeTermBuffer(int)" /> first.
-		/// </summary>
-		/// <param name="length">the truncated length
-		/// </param>
-		public virtual void  SetTermLength(int length)
-		{
-			InitTermBuffer();
-			if (length > termBuffer.Length)
-				throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
-			termLength = length;
-		}
-		
-		public override int GetHashCode()
-		{
-			InitTermBuffer();
-			int code = termLength;
-			code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
-			return code;
-		}
-		
-		public override void  Clear()
-		{
-			termLength = 0;
-		}
-		
-		public override System.Object Clone()
-		{
-			TermAttribute t = (TermAttribute) base.Clone();
-			// Do a deep clone
-			if (termBuffer != null)
-			{
-				t.termBuffer = new char[termBuffer.Length];
-				termBuffer.CopyTo(t.termBuffer, 0);
-			}
-			return t;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is ITermAttribute)
-			{
-				InitTermBuffer();
-				TermAttribute o = ((TermAttribute) other);
-				o.InitTermBuffer();
-				
-				if (termLength != o.termLength)
-					return false;
-				for (int i = 0; i < termLength; i++)
-				{
-					if (termBuffer[i] != o.termBuffer[i])
-					{
-						return false;
-					}
-				}
-				return true;
-			}
-			
-			return false;
-		}
-		
-		public override System.String ToString()
-		{
-			InitTermBuffer();
-			return "term=" + new System.String(termBuffer, 0, termLength);
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			InitTermBuffer();
-			ITermAttribute t = (ITermAttribute) target;
-			t.SetTermBuffer(termBuffer, 0, termLength);
-		}
-	}
+        /// <summary>Copies the contents of buffer, starting at offset for
+        /// length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        public virtual void  SetTermBuffer(char[] buffer, int offset, int length)
+        {
+            GrowTermBuffer(length);
+            Array.Copy(buffer, offset, termBuffer, 0, length);
+            termLength = length;
+        }
+        
+        /// <summary>Copies the contents of buffer into the termBuffer array.</summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        public virtual void  SetTermBuffer(System.String buffer)
+        {
+            int length = buffer.Length;
+            GrowTermBuffer(length);
+            TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
+            termLength = length;
+        }
+        
+        /// <summary>Copies the contents of buffer, starting at offset and continuing
+        /// for length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        public virtual void  SetTermBuffer(System.String buffer, int offset, int length)
+        {
+            System.Diagnostics.Debug.Assert(offset <= buffer.Length);
+            System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
+            GrowTermBuffer(length);
+            TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
+            termLength = length;
+        }
+        
+        /// <summary>Returns the internal termBuffer character array which
+        /// you can then directly alter.  If the array is too
+        /// small for your token, use <see cref="ResizeTermBuffer(int)" />
+        /// to increase it.  After
+        /// altering the buffer be sure to call <see cref="SetTermLength" />
+        /// to record the number of valid
+        /// characters that were placed into the termBuffer. 
+        /// </summary>
+        public virtual char[] TermBuffer()
+        {
+            InitTermBuffer();
+            return termBuffer;
+        }
+        
+        /// <summary>Grows the termBuffer to at least size newSize, preserving the
+        /// existing content. Note: If the next operation is to change
+        /// the contents of the term buffer use
+        /// <see cref="SetTermBuffer(char[], int, int)" />,
+        /// <see cref="SetTermBuffer(String)" />, or
+        /// <see cref="SetTermBuffer(String, int, int)" />
+        /// to optimally combine the resize with the setting of the termBuffer.
+        /// </summary>
+        /// <param name="newSize">minimum size of the new termBuffer
+        /// </param>
+        /// <returns> newly created termBuffer with length >= newSize
+        /// </returns>
+        public virtual char[] ResizeTermBuffer(int newSize)
+        {
+            if (termBuffer == null)
+            {
+                // The buffer is always at least MIN_BUFFER_SIZE
+                termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
+            }
+            else
+            {
+                if (termBuffer.Length < newSize)
+                {
+                    // Not big enough; create a new array with slight
+                    // over allocation and preserve content
+                    char[] newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                    Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
+                    termBuffer = newCharBuffer;
+                }
+            }
+            return termBuffer;
+        }
+        
+        
+        /// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
+        /// its always used in places that set the content 
+        /// </summary>
+        /// <param name="newSize">minimum size of the buffer
+        /// </param>
+        private void  GrowTermBuffer(int newSize)
+        {
+            if (termBuffer == null)
+            {
+                // The buffer is always at least MIN_BUFFER_SIZE
+                termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
+            }
+            else
+            {
+                if (termBuffer.Length < newSize)
+                {
+                    // Not big enough; create a new array with slight
+                    // over allocation:
+                    termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                }
+            }
+        }
+        
+        private void  InitTermBuffer()
+        {
+            if (termBuffer == null)
+            {
+                termBuffer = new char[ArrayUtil.GetNextSize(MIN_BUFFER_SIZE)];
+                termLength = 0;
+            }
+        }
+        
+        /// <summary>Return number of valid characters (length of the term)
+        /// in the termBuffer array. 
+        /// </summary>
+        public virtual int TermLength()
+        {
+            return termLength;
+        }
+        
+        /// <summary>Set number of valid characters (length of the term) in
+        /// the termBuffer array. Use this to truncate the termBuffer
+        /// or to synchronize with external manipulation of the termBuffer.
+        /// Note: to grow the size of the array,
+        /// use <see cref="ResizeTermBuffer(int)" /> first.
+        /// </summary>
+        /// <param name="length">the truncated length
+        /// </param>
+        public virtual void  SetTermLength(int length)
+        {
+            InitTermBuffer();
+            if (length > termBuffer.Length)
+                throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
+            termLength = length;
+        }
+        
+        public override int GetHashCode()
+        {
+            InitTermBuffer();
+            int code = termLength;
+            code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
+            return code;
+        }
+        
+        public override void  Clear()
+        {
+            termLength = 0;
+        }
+        
+        public override System.Object Clone()
+        {
+            TermAttribute t = (TermAttribute) base.Clone();
+            // Do a deep clone
+            if (termBuffer != null)
+            {
+                t.termBuffer = new char[termBuffer.Length];
+                termBuffer.CopyTo(t.termBuffer, 0);
+            }
+            return t;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is ITermAttribute)
+            {
+                InitTermBuffer();
+                TermAttribute o = ((TermAttribute) other);
+                o.InitTermBuffer();
+                
+                if (termLength != o.termLength)
+                    return false;
+                for (int i = 0; i < termLength; i++)
+                {
+                    if (termBuffer[i] != o.termBuffer[i])
+                    {
+                        return false;
+                    }
+                }
+                return true;
+            }
+            
+            return false;
+        }
+        
+        public override System.String ToString()
+        {
+            InitTermBuffer();
+            return "term=" + new System.String(termBuffer, 0, termLength);
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            InitTermBuffer();
+            ITermAttribute t = (ITermAttribute) target;
+            t.SetTermBuffer(termBuffer, 0, termLength);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/TypeAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/TypeAttribute.cs b/src/core/Analysis/Tokenattributes/TypeAttribute.cs
index 1da1c50..cdd5901 100644
--- a/src/core/Analysis/Tokenattributes/TypeAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/TypeAttribute.cs
@@ -20,66 +20,66 @@ using Attribute = Lucene.Net.Util.Attribute;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> A Token's lexical type. The Default value is "word". </summary>
-	[Serializable]
-	public class TypeAttribute:Attribute, ITypeAttribute, System.ICloneable
-	{
-		private System.String type;
-		public const System.String DEFAULT_TYPE = "word";
-		
-		public TypeAttribute():this(DEFAULT_TYPE)
-		{
-		}
-		
-		public TypeAttribute(System.String type)
-		{
-			this.type = type;
-		}
+    
+    /// <summary> A Token's lexical type. The Default value is "word". </summary>
+    [Serializable]
+    public class TypeAttribute:Attribute, ITypeAttribute, System.ICloneable
+    {
+        private System.String type;
+        public const System.String DEFAULT_TYPE = "word";
+        
+        public TypeAttribute():this(DEFAULT_TYPE)
+        {
+        }
+        
+        public TypeAttribute(System.String type)
+        {
+            this.type = type;
+        }
 
-	    /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
-	    public virtual string Type
-	    {
-	        get { return type; }
-	        set { this.type = value; }
-	    }
+        /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+        public virtual string Type
+        {
+            get { return type; }
+            set { this.type = value; }
+        }
 
-	    public override void  Clear()
-		{
-			type = DEFAULT_TYPE;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is TypeAttribute)
-			{
-				return type.Equals(((TypeAttribute) other).type);
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return type.GetHashCode();
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			ITypeAttribute t = (ITypeAttribute) target;
-			t.Type = type;
-		}
-		
-		override public System.Object Clone()
-		{
+        public override void  Clear()
+        {
+            type = DEFAULT_TYPE;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is TypeAttribute)
+            {
+                return type.Equals(((TypeAttribute) other).type);
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return type.GetHashCode();
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            ITypeAttribute t = (ITypeAttribute) target;
+            t.Type = type;
+        }
+        
+        override public System.Object Clone()
+        {
             TypeAttribute impl = new TypeAttribute();
             impl.type = type;
             return impl;
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenizer.cs b/src/core/Analysis/Tokenizer.cs
index 5ab741e..9860141 100644
--- a/src/core/Analysis/Tokenizer.cs
+++ b/src/core/Analysis/Tokenizer.cs
@@ -19,55 +19,55 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> A Tokenizer is a TokenStream whose input is a Reader.
-	/// <p/>
-	/// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()" />
-	/// <p/>
+    
+    /// <summary> A Tokenizer is a TokenStream whose input is a Reader.
+    /// <p/>
+    /// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()" />
+    /// <p/>
     /// NOTE: Subclasses overriding <see cref="TokenStream.IncrementToken()" /> must call
-	/// <see cref="AttributeSource.ClearAttributes()" /> before setting attributes.
-	/// </summary>
-	
-	public abstract class Tokenizer:TokenStream
-	{
-		/// <summary>The text source for this Tokenizer. </summary>
-		protected internal System.IO.TextReader input;
+    /// <see cref="AttributeSource.ClearAttributes()" /> before setting attributes.
+    /// </summary>
+    
+    public abstract class Tokenizer:TokenStream
+    {
+        /// <summary>The text source for this Tokenizer. </summary>
+        protected internal System.IO.TextReader input;
 
-	    private bool isDisposed;
-		
-		/// <summary>Construct a tokenizer with null input. </summary>
-		protected internal Tokenizer()
-		{
-		}
-		
-		/// <summary>Construct a token stream processing the given input. </summary>
-		protected internal Tokenizer(System.IO.TextReader input)
-		{
-			this.input = CharReader.Get(input);
-		}
-		
-		/// <summary>Construct a tokenizer with null input using the given AttributeFactory. </summary>
-		protected internal Tokenizer(AttributeFactory factory):base(factory)
-		{
-		}
-		
-		/// <summary>Construct a token stream processing the given input using the given AttributeFactory. </summary>
-		protected internal Tokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory)
-		{
-			this.input = CharReader.Get(input);
-		}
-		
-		/// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
-		protected internal Tokenizer(AttributeSource source):base(source)
-		{
-		}
-		
-		/// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
-		protected internal Tokenizer(AttributeSource source, System.IO.TextReader input):base(source)
-		{
-			this.input = CharReader.Get(input);
-		}
-		
+        private bool isDisposed;
+        
+        /// <summary>Construct a tokenizer with null input. </summary>
+        protected internal Tokenizer()
+        {
+        }
+        
+        /// <summary>Construct a token stream processing the given input. </summary>
+        protected internal Tokenizer(System.IO.TextReader input)
+        {
+            this.input = CharReader.Get(input);
+        }
+        
+        /// <summary>Construct a tokenizer with null input using the given AttributeFactory. </summary>
+        protected internal Tokenizer(AttributeFactory factory):base(factory)
+        {
+        }
+        
+        /// <summary>Construct a token stream processing the given input using the given AttributeFactory. </summary>
+        protected internal Tokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory)
+        {
+            this.input = CharReader.Get(input);
+        }
+        
+        /// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
+        protected internal Tokenizer(AttributeSource source):base(source)
+        {
+        }
+        
+        /// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
+        protected internal Tokenizer(AttributeSource source, System.IO.TextReader input):base(source)
+        {
+            this.input = CharReader.Get(input);
+        }
+        
         protected override void Dispose(bool disposing)
         {
             if (isDisposed) return;
@@ -86,27 +86,27 @@ namespace Lucene.Net.Analysis
             isDisposed = true;
         }
   
-		/// <summary>Return the corrected offset. If <see cref="input" /> is a <see cref="CharStream" /> subclass
-		/// this method calls <see cref="CharStream.CorrectOffset" />, else returns <c>currentOff</c>.
-		/// </summary>
-		/// <param name="currentOff">offset as seen in the output
-		/// </param>
-		/// <returns> corrected offset based on the input
-		/// </returns>
-		/// <seealso cref="CharStream.CorrectOffset">
-		/// </seealso>
-		protected internal int CorrectOffset(int currentOff)
-		{
-			return (input is CharStream)?((CharStream) input).CorrectOffset(currentOff):currentOff;
-		}
-		
-		/// <summary>Expert: Reset the tokenizer to a new reader.  Typically, an
-		/// analyzer (in its reusableTokenStream method) will use
-		/// this to re-use a previously created tokenizer. 
-		/// </summary>
-		public virtual void  Reset(System.IO.TextReader input)
-		{
-			this.input = input;
-		}
-	}
+        /// <summary>Return the corrected offset. If <see cref="input" /> is a <see cref="CharStream" /> subclass
+        /// this method calls <see cref="CharStream.CorrectOffset" />, else returns <c>currentOff</c>.
+        /// </summary>
+        /// <param name="currentOff">offset as seen in the output
+        /// </param>
+        /// <returns> corrected offset based on the input
+        /// </returns>
+        /// <seealso cref="CharStream.CorrectOffset">
+        /// </seealso>
+        protected internal int CorrectOffset(int currentOff)
+        {
+            return (input is CharStream)?((CharStream) input).CorrectOffset(currentOff):currentOff;
+        }
+        
+        /// <summary>Expert: Reset the tokenizer to a new reader.  Typically, an
+        /// analyzer (in its reusableTokenStream method) will use
+        /// this to re-use a previously created tokenizer. 
+        /// </summary>
+        public virtual void  Reset(System.IO.TextReader input)
+        {
+            this.input = input;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/WhitespaceAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/WhitespaceAnalyzer.cs b/src/core/Analysis/WhitespaceAnalyzer.cs
index 77dbaa3..ae94c44 100644
--- a/src/core/Analysis/WhitespaceAnalyzer.cs
+++ b/src/core/Analysis/WhitespaceAnalyzer.cs
@@ -17,27 +17,27 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>An Analyzer that uses <see cref="WhitespaceTokenizer" />. </summary>
-	
-	public sealed class WhitespaceAnalyzer:Analyzer
-	{
-		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			return new WhitespaceTokenizer(reader);
-		}
-		
-		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			var tokenizer = (Tokenizer) PreviousTokenStream;
-			if (tokenizer == null)
-			{
-				tokenizer = new WhitespaceTokenizer(reader);
-				PreviousTokenStream = tokenizer;
-			}
-			else
-				tokenizer.Reset(reader);
-			return tokenizer;
-		}
-	}
+    
+    /// <summary>An Analyzer that uses <see cref="WhitespaceTokenizer" />. </summary>
+    
+    public sealed class WhitespaceAnalyzer:Analyzer
+    {
+        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            return new WhitespaceTokenizer(reader);
+        }
+        
+        public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            var tokenizer = (Tokenizer) PreviousTokenStream;
+            if (tokenizer == null)
+            {
+                tokenizer = new WhitespaceTokenizer(reader);
+                PreviousTokenStream = tokenizer;
+            }
+            else
+                tokenizer.Reset(reader);
+            return tokenizer;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/WhitespaceTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/WhitespaceTokenizer.cs b/src/core/Analysis/WhitespaceTokenizer.cs
index c96ad50..ba19da9 100644
--- a/src/core/Analysis/WhitespaceTokenizer.cs
+++ b/src/core/Analysis/WhitespaceTokenizer.cs
@@ -19,37 +19,37 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>A WhitespaceTokenizer is a tokenizer that divides text at whitespace.
-	/// Adjacent sequences of non-Whitespace characters form tokens. 
-	/// </summary>
-	
-	public class WhitespaceTokenizer:CharTokenizer
-	{
-		/// <summary>Construct a new WhitespaceTokenizer. </summary>
-		public WhitespaceTokenizer(System.IO.TextReader @in)
-			: base(@in)
-		{
-		}
-		
-		/// <summary>Construct a new WhitespaceTokenizer using a given <see cref="AttributeSource" />. </summary>
-		public WhitespaceTokenizer(AttributeSource source, System.IO.TextReader @in)
-			: base(source, @in)
-		{
-		}
-		
-		/// <summary>Construct a new WhitespaceTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
-		public WhitespaceTokenizer(AttributeFactory factory, System.IO.TextReader @in)
-			: base(factory, @in)
-		{
-		}
-		
-		/// <summary>Collects only characters which do not satisfy
+    
+    /// <summary>A WhitespaceTokenizer is a tokenizer that divides text at whitespace.
+    /// Adjacent sequences of non-Whitespace characters form tokens. 
+    /// </summary>
+    
+    public class WhitespaceTokenizer:CharTokenizer
+    {
+        /// <summary>Construct a new WhitespaceTokenizer. </summary>
+        public WhitespaceTokenizer(System.IO.TextReader @in)
+            : base(@in)
+        {
+        }
+        
+        /// <summary>Construct a new WhitespaceTokenizer using a given <see cref="AttributeSource" />. </summary>
+        public WhitespaceTokenizer(AttributeSource source, System.IO.TextReader @in)
+            : base(source, @in)
+        {
+        }
+        
+        /// <summary>Construct a new WhitespaceTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
+        public WhitespaceTokenizer(AttributeFactory factory, System.IO.TextReader @in)
+            : base(factory, @in)
+        {
+        }
+        
+        /// <summary>Collects only characters which do not satisfy
         /// <see cref="char.IsWhiteSpace(char)" />.
-		/// </summary>
-		protected internal override bool IsTokenChar(char c)
-		{
-			return !System.Char.IsWhiteSpace(c);
-		}
-	}
+        /// </summary>
+        protected internal override bool IsTokenChar(char c)
+        {
+            return !System.Char.IsWhiteSpace(c);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/WordlistLoader.cs b/src/core/Analysis/WordlistLoader.cs
index bfd1b07..d3abfe6 100644
--- a/src/core/Analysis/WordlistLoader.cs
+++ b/src/core/Analysis/WordlistLoader.cs
@@ -19,128 +19,128 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> Loader for text files that represent a list of stopwords.</summary>
-	public class WordlistLoader
-	{
-		
-		/// <summary> Loads a text file and adds every line as an entry to a HashSet (omitting
-		/// leading and trailing whitespace). Every line of the file should contain only
-		/// one word. The words need to be in lowercase if you make use of an
-		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// </summary>
-		/// <param name="wordfile">File containing the wordlist</param>
-		/// <returns> A HashSet with the file's words</returns>
-		public static ISet<string> GetWordSet(System.IO.FileInfo wordfile)
-		{
+    
+    /// <summary> Loader for text files that represent a list of stopwords.</summary>
+    public class WordlistLoader
+    {
+        
+        /// <summary> Loads a text file and adds every line as an entry to a HashSet (omitting
+        /// leading and trailing whitespace). Every line of the file should contain only
+        /// one word. The words need to be in lowercase if you make use of an
+        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// </summary>
+        /// <param name="wordfile">File containing the wordlist</param>
+        /// <returns> A HashSet with the file's words</returns>
+        public static ISet<string> GetWordSet(System.IO.FileInfo wordfile)
+        {
             using (var reader = new System.IO.StreamReader(wordfile.FullName, System.Text.Encoding.Default))
             {
                 return GetWordSet(reader);
             }
-		}
-		
-		/// <summary> Loads a text file and adds every non-comment line as an entry to a HashSet (omitting
-		/// leading and trailing whitespace). Every line of the file should contain only
-		/// one word. The words need to be in lowercase if you make use of an
-		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// </summary>
-		/// <param name="wordfile">File containing the wordlist</param>
-		/// <param name="comment">The comment string to ignore</param>
-		/// <returns> A HashSet with the file's words</returns>
-		public static ISet<string> GetWordSet(System.IO.FileInfo wordfile, System.String comment)
-		{
+        }
+        
+        /// <summary> Loads a text file and adds every non-comment line as an entry to a HashSet (omitting
+        /// leading and trailing whitespace). Every line of the file should contain only
+        /// one word. The words need to be in lowercase if you make use of an
+        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// </summary>
+        /// <param name="wordfile">File containing the wordlist</param>
+        /// <param name="comment">The comment string to ignore</param>
+        /// <returns> A HashSet with the file's words</returns>
+        public static ISet<string> GetWordSet(System.IO.FileInfo wordfile, System.String comment)
+        {
             using (var reader = new System.IO.StreamReader(wordfile.FullName, System.Text.Encoding.Default))
             {
                 return GetWordSet(reader, comment);
             }
-		}
-		
-		
-		/// <summary> Reads lines from a Reader and adds every line as an entry to a HashSet (omitting
-		/// leading and trailing whitespace). Every line of the Reader should contain only
-		/// one word. The words need to be in lowercase if you make use of an
-		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// </summary>
-		/// <param name="reader">Reader containing the wordlist</param>
-		/// <returns>A HashSet with the reader's words</returns>
-		public static ISet<string> GetWordSet(System.IO.TextReader reader)
-		{
+        }
+        
+        
+        /// <summary> Reads lines from a Reader and adds every line as an entry to a HashSet (omitting
+        /// leading and trailing whitespace). Every line of the Reader should contain only
+        /// one word. The words need to be in lowercase if you make use of an
+        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// </summary>
+        /// <param name="reader">Reader containing the wordlist</param>
+        /// <returns>A HashSet with the reader's words</returns>
+        public static ISet<string> GetWordSet(System.IO.TextReader reader)
+        {
             var result = Support.Compatibility.SetFactory.CreateHashSet<string>();
 
-			System.String word;
-			while ((word = reader.ReadLine()) != null)
-			{
-				result.Add(word.Trim());
-			}
+            System.String word;
+            while ((word = reader.ReadLine()) != null)
+            {
+                result.Add(word.Trim());
+            }
 
-			return result;
-		}
+            return result;
+        }
 
-		/// <summary> Reads lines from a Reader and adds every non-comment line as an entry to a HashSet (omitting
-		/// leading and trailing whitespace). Every line of the Reader should contain only
-		/// one word. The words need to be in lowercase if you make use of an
-		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// 
-		/// </summary>
-		/// <param name="reader">Reader containing the wordlist
-		/// </param>
-		/// <param name="comment">The string representing a comment.
-		/// </param>
-		/// <returns> A HashSet with the reader's words
-		/// </returns>
-		public static ISet<string> GetWordSet(System.IO.TextReader reader, System.String comment)
-		{
+        /// <summary> Reads lines from a Reader and adds every non-comment line as an entry to a HashSet (omitting
+        /// leading and trailing whitespace). Every line of the Reader should contain only
+        /// one word. The words need to be in lowercase if you make use of an
+        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// 
+        /// </summary>
+        /// <param name="reader">Reader containing the wordlist
+        /// </param>
+        /// <param name="comment">The string representing a comment.
+        /// </param>
+        /// <returns> A HashSet with the reader's words
+        /// </returns>
+        public static ISet<string> GetWordSet(System.IO.TextReader reader, System.String comment)
+        {
             var result = Support.Compatibility.SetFactory.CreateHashSet<string>();
 
             System.String word = null;
-			while ((word = reader.ReadLine()) != null)
-			{
-				if (word.StartsWith(comment) == false)
-				{
-					result.Add(word.Trim());
-				}
-			}
+            while ((word = reader.ReadLine()) != null)
+            {
+                if (word.StartsWith(comment) == false)
+                {
+                    result.Add(word.Trim());
+                }
+            }
 
-			return result;
-		}
+            return result;
+        }
 
 
 
-		/// <summary> Reads a stem dictionary. Each line contains:
-		/// <c>word<b>\t</b>stem</c>
-		/// (i.e. two tab seperated words)
-		/// 
-		/// </summary>
-		/// <returns> stem dictionary that overrules the stemming algorithm
-		/// </returns>
-		/// <throws>  IOException  </throws>
-		public static Dictionary<string, string> GetStemDict(System.IO.FileInfo wordstemfile)
-		{
-			if (wordstemfile == null)
-				throw new System.NullReferenceException("wordstemfile may not be null");
+        /// <summary> Reads a stem dictionary. Each line contains:
+        /// <c>word<b>\t</b>stem</c>
+        /// (i.e. two tab seperated words)
+        /// 
+        /// </summary>
+        /// <returns> stem dictionary that overrules the stemming algorithm
+        /// </returns>
+        /// <throws>  IOException  </throws>
+        public static Dictionary<string, string> GetStemDict(System.IO.FileInfo wordstemfile)
+        {
+            if (wordstemfile == null)
+                throw new System.NullReferenceException("wordstemfile may not be null");
             var result = new Dictionary<string, string>();
-			System.IO.StreamReader br = null;
-			System.IO.StreamReader fr = null;
-			try
-			{
-				fr = new System.IO.StreamReader(wordstemfile.FullName, System.Text.Encoding.Default);
-				br = new System.IO.StreamReader(fr.BaseStream, fr.CurrentEncoding);
-				System.String line;
+            System.IO.StreamReader br = null;
+            System.IO.StreamReader fr = null;
+            try
+            {
+                fr = new System.IO.StreamReader(wordstemfile.FullName, System.Text.Encoding.Default);
+                br = new System.IO.StreamReader(fr.BaseStream, fr.CurrentEncoding);
+                System.String line;
                 char[] tab = {'\t'};
-				while ((line = br.ReadLine()) != null)
-				{
-					System.String[] wordstem = line.Split(tab, 2);
-					result[wordstem[0]] = wordstem[1];
-				}
-			}
-			finally
-			{
-				if (fr != null)
-					fr.Close();
-				if (br != null)
-					br.Close();
-			}
-			return result;
-		}
-	}
+                while ((line = br.ReadLine()) != null)
+                {
+                    System.String[] wordstem = line.Split(tab, 2);
+                    result[wordstem[0]] = wordstem[1];
+                }
+            }
+            finally
+            {
+                if (fr != null)
+                    fr.Close();
+                if (br != null)
+                    br.Close();
+            }
+            return result;
+        }
+    }
 }
\ No newline at end of file


Mime
View raw message