lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From synhers...@apache.org
Subject [4/6] lucenenet git commit: More porting work on Analysis.Common
Date Mon, 23 Feb 2015 20:51:57 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
index b0fb325..5f79619 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
@@ -1,8 +1,9 @@
 using System;
 using System.Collections.Generic;
 using System.Text;
+using Reader = System.IO.TextReader;
 
-namespace org.apache.lucene.analysis.synonym
+namespace Lucene.Net.Analysis.Synonym
 {
 
 	/*
@@ -21,11 +22,7 @@ namespace org.apache.lucene.analysis.synonym
 	 * See the License for the specific language governing permissions and
 	 * limitations under the License.
 	 */
-
-
-	using CharsRef = org.apache.lucene.util.CharsRef;
-
-	/// <summary>
+    /// <summary>
 	/// Parser for the Solr synonyms format.
 	/// <ol>
 	///   <li> Blank lines and lines starting with '#' are comments.
@@ -61,9 +58,7 @@ namespace org.apache.lucene.analysis.synonym
 		this.expand = expand;
 	  }
 
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: @Override public void parse(java.io.Reader in) throws java.io.IOException, java.text.ParseException
-	  public override void parse(Reader @in)
+	  public override void Parse(Reader @in)
 	  {
 		LineNumberReader br = new LineNumberReader(@in);
 		try
@@ -109,14 +104,14 @@ namespace org.apache.lucene.analysis.synonym
 			inputs = new CharsRef[inputStrings.Length];
 			for (int i = 0; i < inputs.Length; i++)
 			{
-			  inputs[i] = analyze(unescape(inputStrings[i]).Trim(), new CharsRef());
+			  inputs[i] = Analyze(unescape(inputStrings[i]).Trim(), new CharsRef());
 			}
 
 			string[] outputStrings = Split(sides[1], ",");
 			outputs = new CharsRef[outputStrings.Length];
 			for (int i = 0; i < outputs.Length; i++)
 			{
-			  outputs[i] = analyze(unescape(outputStrings[i]).Trim(), new CharsRef());
+			  outputs[i] = Analyze(unescape(outputStrings[i]).Trim(), new CharsRef());
 			}
 		  }
 		  else
@@ -125,7 +120,7 @@ namespace org.apache.lucene.analysis.synonym
 			inputs = new CharsRef[inputStrings.Length];
 			for (int i = 0; i < inputs.Length; i++)
 			{
-			  inputs[i] = analyze(unescape(inputStrings[i]).Trim(), new CharsRef());
+			  inputs[i] = Analyze(unescape(inputStrings[i]).Trim(), new CharsRef());
 			}
 			if (expand)
 			{
@@ -146,7 +141,7 @@ namespace org.apache.lucene.analysis.synonym
 		  {
 			for (int j = 0; j < outputs.Length; j++)
 			{
-			  add(inputs[i], outputs[j], false);
+			  Add(inputs[i], outputs[j], false);
 			}
 		  }
 		}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
index d3bb929..2d5169d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
@@ -1,7 +1,12 @@
 using System;
 using System.Diagnostics;
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Fst;
 
-namespace org.apache.lucene.analysis.synonym
+namespace Lucene.Net.Analysis.Synonym
 {
 
 	/*
@@ -21,20 +26,6 @@ namespace org.apache.lucene.analysis.synonym
 	 * limitations under the License.
 	 */
 
-	using CharTermAttribute = org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-	using OffsetAttribute = org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-	using PositionIncrementAttribute = org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-	using PositionLengthAttribute = org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
-	using TypeAttribute = org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-	using ByteArrayDataInput = org.apache.lucene.store.ByteArrayDataInput;
-	using ArrayUtil = org.apache.lucene.util.ArrayUtil;
-	using AttributeSource = org.apache.lucene.util.AttributeSource;
-	using BytesRef = org.apache.lucene.util.BytesRef;
-	using CharsRef = org.apache.lucene.util.CharsRef;
-	using RamUsageEstimator = org.apache.lucene.util.RamUsageEstimator;
-	using UnicodeUtil = org.apache.lucene.util.UnicodeUtil;
-	using FST = org.apache.lucene.util.fst.FST;
-
 	/// <summary>
 	/// Matches single or multi word synonyms in a token stream.
 	/// This token stream cannot properly handle position
@@ -117,11 +108,12 @@ namespace org.apache.lucene.analysis.synonym
 
 	  // TODO: we should set PositionLengthAttr too...
 
-	  private readonly CharTermAttribute termAtt = addAttribute(typeof(CharTermAttribute));
-	  private readonly PositionIncrementAttribute posIncrAtt = addAttribute(typeof(PositionIncrementAttribute));
-	  private readonly PositionLengthAttribute posLenAtt = addAttribute(typeof(PositionLengthAttribute));
-	  private readonly TypeAttribute typeAtt = addAttribute(typeof(TypeAttribute));
-	  private readonly OffsetAttribute offsetAtt = addAttribute(typeof(OffsetAttribute));
+	    private readonly ICharTermAttribute termAtt;
+	    private readonly IPositionIncrementAttribute posIncrAtt;
+	    private readonly IPositionLengthAttribute posLenAtt;
+	    private readonly ITypeAttribute typeAtt;
+	    private readonly IOffsetAttribute offsetAtt;
+
 
 	  // How many future input tokens have already been matched
 	  // to a synonym; because the matching is "greedy" we don't
@@ -144,7 +136,7 @@ namespace org.apache.lucene.analysis.synonym
 		internal int startOffset;
 		internal int endOffset;
 
-		public virtual void reset()
+		public void Reset()
 		{
 		  state = null;
 		  consumed = true;
@@ -287,6 +279,12 @@ namespace org.apache.lucene.analysis.synonym
 	  ///                   the input entries when you create the <seealso cref="SynonymMap"/> </param>
 	  public SynonymFilter(TokenStream input, SynonymMap synonyms, bool ignoreCase) : base(input)
 	  {
+          termAtt = AddAttribute<ICharTermAttribute>();
+	  posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
+	  posLenAtt = AddAttribute<IPositionLengthAttribute>();
+	  typeAtt = AddAttribute<ITypeAttribute>();
+	  offsetAtt = AddAttribute<IOffsetAttribute>();
+
 		this.synonyms = synonyms;
 		this.ignoreCase = ignoreCase;
 		this.fst = synonyms.fst;
@@ -318,13 +316,11 @@ namespace org.apache.lucene.analysis.synonym
 	  {
 		captureCount++;
 		//System.out.println("  capture slot=" + nextWrite);
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final PendingInput input = futureInputs[nextWrite];
 		PendingInput input = futureInputs[nextWrite];
 
-		input.state = captureState();
+		input.state = CaptureState();
 		input.consumed = false;
-		input.term.copyChars(termAtt.buffer(), 0, termAtt.length());
+		input.term.CopyChars(termAtt.Buffer(), 0, termAtt.Length);
 
 		nextWrite = rollIncr(nextWrite);
 
@@ -344,9 +340,7 @@ namespace org.apache.lucene.analysis.synonym
 	  private int lastStartOffset;
 	  private int lastEndOffset;
 
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: private void parse() throws java.io.IOException
-	  private void parse()
+	  private void Parse()
 	  {
 		//System.out.println("\nS: parse");
 
@@ -359,10 +353,10 @@ namespace org.apache.lucene.analysis.synonym
 		int matchInputLength = 0;
 		int matchEndOffset = -1;
 
-		BytesRef pendingOutput = fst.outputs.NoOutput;
-		fst.getFirstArc(scratchArc);
+		BytesRef pendingOutput = fst.Outputs.NoOutput;
+		fst.GetFirstArc(scratchArc);
 
-		Debug.Assert(scratchArc.output == fst.outputs.NoOutput);
+		Debug.Assert(scratchArc.Output == fst.Outputs.NoOutput);
 
 		int tokenCount = 0;
 
@@ -370,11 +364,7 @@ namespace org.apache.lucene.analysis.synonym
 		{
 
 		  // Pull next token's chars:
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final char[] buffer;
 		  char[] buffer;
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int bufferLen;
 		  int bufferLen;
 		  //System.out.println("  cycle nextRead=" + curNextRead + " nextWrite=" + nextWrite);
 
@@ -398,15 +388,13 @@ namespace org.apache.lucene.analysis.synonym
 			  // than its input can set future inputs keepOrig
 			  // to true:
 			  //assert !futureInputs[nextWrite].keepOrig;
-			  if (input.incrementToken())
+			  if (input.IncrementToken())
 			  {
-				buffer = termAtt.buffer();
-				bufferLen = termAtt.length();
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final PendingInput input = futureInputs[nextWrite];
+				buffer = termAtt.Buffer();
+				bufferLen = termAtt.Length;
 				PendingInput input = futureInputs[nextWrite];
-				lastStartOffset = input.startOffset = offsetAtt.startOffset();
-				lastEndOffset = input.endOffset = offsetAtt.endOffset();
+				lastStartOffset = input.startOffset = offsetAtt.StartOffset();
+				lastEndOffset = input.endOffset = offsetAtt.EndOffset();
 				inputEndOffset = input.endOffset;
 				//System.out.println("  new token=" + new String(buffer, 0, bufferLen));
 				if (nextRead != nextWrite)
@@ -431,8 +419,8 @@ namespace org.apache.lucene.analysis.synonym
 		  else
 		  {
 			// Still in our lookahead
-			buffer = futureInputs[curNextRead].term.chars;
-			bufferLen = futureInputs[curNextRead].term.length;
+			buffer = futureInputs[curNextRead].term.Chars;
+			bufferLen = futureInputs[curNextRead].term.Length;
 			inputEndOffset = futureInputs[curNextRead].endOffset;
 			//System.out.println("  old token=" + new String(buffer, 0, bufferLen));
 		  }
@@ -443,26 +431,24 @@ namespace org.apache.lucene.analysis.synonym
 		  int bufUpto = 0;
 		  while (bufUpto < bufferLen)
 		  {
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int codePoint = Character.codePointAt(buffer, bufUpto, bufferLen);
-			int codePoint = char.codePointAt(buffer, bufUpto, bufferLen);
-			if (fst.findTargetArc(ignoreCase ? char.ToLower(codePoint) : codePoint, scratchArc, scratchArc, fstReader) == null)
+			int codePoint = Character.CodePointAt(buffer, bufUpto, bufferLen);
+			if (fst.FindTargetArc(ignoreCase ? char.ToLower(codePoint) : codePoint, scratchArc, scratchArc, fstReader) == null)
 			{
 			  //System.out.println("    stop");
 			  goto byTokenBreak;
 			}
 
 			// Accum the output
-			pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output);
+			pendingOutput = fst.Outputs.Add(pendingOutput, scratchArc.Output);
 			//System.out.println("    char=" + buffer[bufUpto] + " output=" + pendingOutput + " arc.output=" + scratchArc.output);
-			bufUpto += char.charCount(codePoint);
+			bufUpto += Character.CharCount(codePoint);
 		  }
 
 		  // OK, entire token matched; now see if this is a final
 		  // state:
 		  if (scratchArc.Final)
 		  {
-			matchOutput = fst.outputs.add(pendingOutput, scratchArc.nextFinalOutput);
+			matchOutput = fst.Outputs.Add(pendingOutput, scratchArc.NextFinalOutput);
 			matchInputLength = tokenCount;
 			matchEndOffset = inputEndOffset;
 			//System.out.println("  found matchLength=" + matchInputLength + " output=" + matchOutput);
@@ -470,7 +456,7 @@ namespace org.apache.lucene.analysis.synonym
 
 		  // See if the FST wants to continue matching (ie, needs to
 		  // see the next input token):
-		  if (fst.findTargetArc(SynonymMap.WORD_SEPARATOR, scratchArc, scratchArc, fstReader) == null)
+		  if (fst.FindTargetArc(SynonymMap.WORD_SEPARATOR, scratchArc, scratchArc, fstReader) == null)
 		  {
 			// No further rules can match here; we're done
 			// searching for matching rules starting at the
@@ -503,7 +489,7 @@ namespace org.apache.lucene.analysis.synonym
 		{
 		  //System.out.println("  add matchLength=" + matchInputLength + " output=" + matchOutput);
 		  inputSkipCount = matchInputLength;
-		  addOutput(matchOutput, matchInputLength, matchEndOffset);
+		  AddOutput(matchOutput, matchInputLength, matchEndOffset);
 		}
 		else if (nextRead != nextWrite)
 		{
@@ -521,47 +507,33 @@ namespace org.apache.lucene.analysis.synonym
 	  }
 
 	  // Interleaves all output tokens onto the futureOutputs:
-	  private void addOutput(BytesRef bytes, int matchInputLength, int matchEndOffset)
+	  private void AddOutput(BytesRef bytes, int matchInputLength, int matchEndOffset)
 	  {
-		bytesReader.reset(bytes.bytes, bytes.offset, bytes.length);
+		bytesReader.Reset(bytes.Bytes, bytes.Offset, bytes.Length);
 
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int code = bytesReader.readVInt();
-		int code = bytesReader.readVInt();
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final boolean keepOrig = (code & 0x1) == 0;
+		int code = bytesReader.ReadVInt();
 		bool keepOrig = (code & 0x1) == 0;
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int count = code >>> 1;
 		int count = (int)((uint)code >> 1);
 		//System.out.println("  addOutput count=" + count + " keepOrig=" + keepOrig);
 		for (int outputIDX = 0;outputIDX < count;outputIDX++)
 		{
-		  synonyms.words.get(bytesReader.readVInt(), scratchBytes);
+		  synonyms.words.Get(bytesReader.ReadVInt(), scratchBytes);
 		  //System.out.println("    outIDX=" + outputIDX + " bytes=" + scratchBytes.length);
 		  UnicodeUtil.UTF8toUTF16(scratchBytes, scratchChars);
-		  int lastStart = scratchChars.offset;
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int chEnd = lastStart + scratchChars.length;
-		  int chEnd = lastStart + scratchChars.length;
+		  int lastStart = scratchChars.Offset;
+		  int chEnd = lastStart + scratchChars.Length;
 		  int outputUpto = nextRead;
 		  for (int chIDX = lastStart;chIDX <= chEnd;chIDX++)
 		  {
 			if (chIDX == chEnd || scratchChars.chars[chIDX] == SynonymMap.WORD_SEPARATOR)
 			{
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int outputLen = chIDX - lastStart;
 			  int outputLen = chIDX - lastStart;
 			  // Caller is not allowed to have empty string in
 			  // the output:
 			  Debug.Assert(outputLen > 0, "output contains empty string: " + scratchChars);
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int endOffset;
 			  int endOffset;
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int posLen;
 			  int posLen;
-			  if (chIDX == chEnd && lastStart == scratchChars.offset)
+			  if (chIDX == chEnd && lastStart == scratchChars.Offset)
 			  {
 				// This rule had a single output token, so, we set
 				// this output's endOffset to the current
@@ -664,7 +636,7 @@ namespace org.apache.lucene.analysis.synonym
 				// but didn't capture:
 				Debug.Assert(inputSkipCount == 1, "inputSkipCount=" + inputSkipCount + " nextRead=" + nextRead);
 			  }
-			  input.reset();
+			  input.Reset();
 			  if (outputs.count > 0)
 			  {
 				outputs.posIncr = 0;
@@ -681,7 +653,7 @@ namespace org.apache.lucene.analysis.synonym
 			{
 			  // Still have pending outputs to replay at this
 			  // position
-			  input.reset();
+			  input.Reset();
 //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
 //ORIGINAL LINE: final int posIncr = outputs.posIncr;
 			  int posIncr = outputs.posIncr;
@@ -713,7 +685,7 @@ namespace org.apache.lucene.analysis.synonym
 			{
 			  // Done with the buffered input and all outputs at
 			  // this position
-			  input.reset();
+			  input.Reset();
 			  nextRead = rollIncr(nextRead);
 			  inputSkipCount--;
 			}
@@ -734,7 +706,7 @@ namespace org.apache.lucene.analysis.synonym
 //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
 //ORIGINAL LINE: final org.apache.lucene.util.CharsRef output = outputs.pullNext();
 			  CharsRef output = outputs.pullNext();
-			  futureInputs[nextRead].reset();
+			  futureInputs[nextRead].Reset();
 			  if (outputs.count == 0)
 			  {
 				nextWrite = nextRead = rollIncr(nextRead);
@@ -756,7 +728,7 @@ namespace org.apache.lucene.analysis.synonym
 		  }
 
 		  // Find new synonym matches:
-		  parse();
+		  Parse();
 		}
 	  }
 
@@ -777,7 +749,7 @@ namespace org.apache.lucene.analysis.synonym
 		// here:
 		foreach (PendingInput input in futureInputs)
 		{
-		  input.reset();
+		  input.Reset();
 		}
 		foreach (PendingOutputs output in futureOutputs)
 		{

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
index 9d924f7..714f509 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
@@ -2,7 +2,6 @@
 using System.Collections.Generic;
 using Lucene.Net.Analysis.Util;
 using Lucene.Net.Util;
-using org.apache.lucene.analysis.synonym;
 using org.apache.lucene.analysis.util;
 
 namespace Lucene.Net.Analysis.Synonym
@@ -65,7 +64,7 @@ namespace Lucene.Net.Analysis.Synonym
             : base(args)
 	  {
 		assureMatchVersion();
-		if (luceneMatchVersion.OnOrAfter(Lucene.Net.Util.Version.LUCENE_34))
+		if (luceneMatchVersion.OnOrAfter(Lucene.Net.Util.LuceneVersion.LUCENE_34))
 		{
 		  delegator = new FSTSynonymFilterFactory(new Dictionary<string, string>(OriginalArgs));
 		}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
index 004572d..ab7761f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
@@ -1,8 +1,16 @@
 using System;
-using System.Diagnostics;
 using System.Collections.Generic;
-
-namespace org.apache.lucene.analysis.synonym
+using System.Diagnostics;
+using System.IO;
+using System.Linq;
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Fst;
+using Reader = System.IO.TextReader;
+
+namespace Lucene.Net.Analysis.Synonym
 {
 
 	/*
@@ -21,22 +29,7 @@ namespace org.apache.lucene.analysis.synonym
 	 * See the License for the specific language governing permissions and
 	 * limitations under the License.
 	 */
-
-
-	using CharTermAttribute = org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-	using PositionIncrementAttribute = org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-	using ByteArrayDataOutput = org.apache.lucene.store.ByteArrayDataOutput;
-	using BytesRef = org.apache.lucene.util.BytesRef;
-	using BytesRefHash = org.apache.lucene.util.BytesRefHash;
-	using CharsRef = org.apache.lucene.util.CharsRef;
-	using IOUtils = org.apache.lucene.util.IOUtils;
-	using IntsRef = org.apache.lucene.util.IntsRef;
-	using UnicodeUtil = org.apache.lucene.util.UnicodeUtil;
-	using ByteSequenceOutputs = org.apache.lucene.util.fst.ByteSequenceOutputs;
-	using FST = org.apache.lucene.util.fst.FST;
-	using Util = org.apache.lucene.util.fst.Util;
-
-	/// <summary>
+    /// <summary>
 	/// A map of synonyms, keys and values are phrases.
 	/// @lucene.experimental
 	/// </summary>
@@ -86,7 +79,7 @@ namespace org.apache.lucene.analysis.synonym
 		  this.dedup = dedup;
 		}
 
-		private class MapEntry
+		internal class MapEntry
 		{
 		  internal bool includeOrig;
 		  // we could sort for better sharing ultimately, but it could confuse people
@@ -101,19 +94,15 @@ namespace org.apache.lucene.analysis.synonym
 		public static CharsRef join(string[] words, CharsRef reuse)
 		{
 		  int upto = 0;
-		  char[] buffer = reuse.chars;
+		  char[] buffer = reuse.Chars;
 		  foreach (string word in words)
 		  {
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int wordLen = word.length();
 			int wordLen = word.Length;
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int needed = (0 == upto ? wordLen : 1 + upto + wordLen);
 			int needed = (0 == upto ? wordLen : 1 + upto + wordLen); // Add 1 for WORD_SEPARATOR
 			if (needed > buffer.Length)
 			{
-			  reuse.grow(needed);
-			  buffer = reuse.chars;
+			  reuse.Grow(needed);
+			  buffer = reuse.Chars;
 			}
 			if (upto > 0)
 			{
@@ -123,31 +112,30 @@ namespace org.apache.lucene.analysis.synonym
 			word.CopyTo(0, buffer, upto, wordLen - 0);
 			upto += wordLen;
 		  }
-		  reuse.length = upto;
+		  reuse.Length = upto;
 		  return reuse;
 		}
 
 
 
 		/// <summary>
-		/// only used for asserting! </summary>
-		internal virtual bool hasHoles(CharsRef chars)
+		/// only used for asserting!
+		/// </summary>
+		internal virtual bool HasHoles(CharsRef chars)
 		{
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int end = chars.offset + chars.length;
-		  int end = chars.offset + chars.length;
-		  for (int idx = chars.offset + 1;idx < end;idx++)
+		  int end = chars.Offset + chars.Length;
+		  for (int idx = chars.Offset + 1;idx < end;idx++)
 		  {
-			if (chars.chars[idx] == SynonymMap.WORD_SEPARATOR && chars.chars[idx - 1] == SynonymMap.WORD_SEPARATOR)
+			if (chars.Chars[idx] == SynonymMap.WORD_SEPARATOR && chars.Chars[idx - 1] == SynonymMap.WORD_SEPARATOR)
 			{
 			  return true;
 			}
 		  }
-		  if (chars.chars[chars.offset] == '\u0000')
+		  if (chars.Chars[chars.Offset] == '\u0000')
 		  {
 			return true;
 		  }
-		  if (chars.chars[chars.offset + chars.length - 1] == '\u0000')
+		  if (chars.Chars[chars.Offset + chars.Length - 1] == '\u0000')
 		  {
 			return true;
 		  }
@@ -160,33 +148,33 @@ namespace org.apache.lucene.analysis.synonym
 		// numInput/numOutputWords, sneaky exceptions, much later
 		// on, will result if these values are wrong; so we always
 		// recompute ourselves to be safe:
-		internal virtual void add(CharsRef input, int numInputWords, CharsRef output, int numOutputWords, bool includeOrig)
+		internal virtual void Add(CharsRef input, int numInputWords, CharsRef output, int numOutputWords, bool includeOrig)
 		{
 		  // first convert to UTF-8
 		  if (numInputWords <= 0)
 		  {
 			throw new System.ArgumentException("numInputWords must be > 0 (got " + numInputWords + ")");
 		  }
-		  if (input.length <= 0)
+		  if (input.Length <= 0)
 		  {
-			throw new System.ArgumentException("input.length must be > 0 (got " + input.length + ")");
+			throw new System.ArgumentException("input.length must be > 0 (got " + input.Length + ")");
 		  }
 		  if (numOutputWords <= 0)
 		  {
 			throw new System.ArgumentException("numOutputWords must be > 0 (got " + numOutputWords + ")");
 		  }
-		  if (output.length <= 0)
+		  if (output.Length <= 0)
 		  {
-			throw new System.ArgumentException("output.length must be > 0 (got " + output.length + ")");
+			throw new System.ArgumentException("output.length must be > 0 (got " + output.Length + ")");
 		  }
 
-		  Debug.Assert(!hasHoles(input), "input has holes: " + input);
-		  Debug.Assert(!hasHoles(output), "output has holes: " + output);
+		  Debug.Assert(!HasHoles(input), "input has holes: " + input);
+		  Debug.Assert(!HasHoles(output), "output has holes: " + output);
 
 		  //System.out.println("fmap.add input=" + input + " numInputWords=" + numInputWords + " output=" + output + " numOutputWords=" + numOutputWords);
-		  UnicodeUtil.UTF16toUTF8(output.chars, output.offset, output.length, utf8Scratch);
+		  UnicodeUtil.UTF16toUTF8(output.Chars, output.Offset, output.Length, utf8Scratch);
 		  // lookup in hash
-		  int ord = words.add(utf8Scratch);
+		  int ord = words.Add(utf8Scratch);
 		  if (ord < 0)
 		  {
 			// already exists in our hash
@@ -202,7 +190,7 @@ namespace org.apache.lucene.analysis.synonym
 		  if (e == null)
 		  {
 			e = new MapEntry();
-			workingSet[CharsRef.deepCopyOf(input)] = e; // make a copy, since we will keep around in our map
+			workingSet[CharsRef.DeepCopyOf(input)] = e; // make a copy, since we will keep around in our map
 		  }
 
 		  e.ords.Add(ord);
@@ -214,13 +202,11 @@ namespace org.apache.lucene.analysis.synonym
 		internal virtual int countWords(CharsRef chars)
 		{
 		  int wordCount = 1;
-		  int upto = chars.offset;
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int limit = chars.offset + chars.length;
-		  int limit = chars.offset + chars.length;
+		  int upto = chars.Offset;
+		  int limit = chars.Offset + chars.Length;
 		  while (upto < limit)
 		  {
-			if (chars.chars[upto++] == SynonymMap.WORD_SEPARATOR)
+			if (chars.Chars[upto++] == SynonymMap.WORD_SEPARATOR)
 			{
 			  wordCount++;
 			}
@@ -238,48 +224,41 @@ namespace org.apache.lucene.analysis.synonym
 		/// <param name="input"> input phrase </param>
 		/// <param name="output"> output phrase </param>
 		/// <param name="includeOrig"> true if the original should be included </param>
-		public virtual void add(CharsRef input, CharsRef output, bool includeOrig)
+		public virtual void Add(CharsRef input, CharsRef output, bool includeOrig)
 		{
-		  add(input, countWords(input), output, countWords(output), includeOrig);
+		  Add(input, countWords(input), output, countWords(output), includeOrig);
 		}
 
 		/// <summary>
 		/// Builds an <seealso cref="SynonymMap"/> and returns it.
 		/// </summary>
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: public SynonymMap build() throws java.io.IOException
-		public virtual SynonymMap build()
+		public virtual SynonymMap Build()
 		{
 		  ByteSequenceOutputs outputs = ByteSequenceOutputs.Singleton;
 		  // TODO: are we using the best sharing options?
-		  org.apache.lucene.util.fst.Builder<BytesRef> builder = new org.apache.lucene.util.fst.Builder<BytesRef>(FST.INPUT_TYPE.BYTE4, outputs);
+		  var builder = new Builder<BytesRef>(FST.INPUT_TYPE.BYTE4, outputs);
 
 		  BytesRef scratch = new BytesRef(64);
 		  ByteArrayDataOutput scratchOutput = new ByteArrayDataOutput();
 
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final java.util.Set<Integer> dedupSet;
 		  HashSet<int?> dedupSet;
 
 		  if (dedup)
 		  {
-			dedupSet = new HashSet<>();
+			dedupSet = new HashSet<int?>();
 		  }
 		  else
 		  {
 			dedupSet = null;
 		  }
 
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final byte[] spare = new byte[5];
-		  sbyte[] spare = new sbyte[5];
+		  
+            var spare = new sbyte[5];
 
 		  Dictionary<CharsRef, MapEntry>.KeyCollection keys = workingSet.Keys;
-		  CharsRef[] sortedKeys = keys.toArray(new CharsRef[keys.size()]);
-		  Arrays.sort(sortedKeys, CharsRef.UTF16SortedAsUTF8Comparator);
+		  CharsRef[] sortedKeys = keys.ToArray();
+		  Arrays.Sort(sortedKeys, CharsRef.UTF16SortedAsUTF8Comparator);
 
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final org.apache.lucene.util.IntsRef scratchIntsRef = new org.apache.lucene.util.IntsRef();
 		  IntsRef scratchIntsRef = new IntsRef();
 
 		  //System.out.println("fmap.build");
@@ -292,9 +271,9 @@ namespace org.apache.lucene.analysis.synonym
 			// output size, assume the worst case
 			int estimatedSize = 5 + numEntries * 5; // numEntries + one ord for each entry
 
-			scratch.grow(estimatedSize);
-			scratchOutput.reset(scratch.bytes, scratch.offset, scratch.bytes.length);
-			Debug.Assert(scratch.offset == 0);
+			scratch.Grow(estimatedSize);
+			scratchOutput.Reset(scratch.Bytes, scratch.Offset, scratch.Bytes.Length);
+			Debug.Assert(scratch.Offset == 0);
 
 			// now write our output data:
 			int count = 0;
@@ -303,8 +282,6 @@ namespace org.apache.lucene.analysis.synonym
 			  if (dedupSet != null)
 			  {
 				// box once
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final Integer ent = output.ords.get(i);
 				int? ent = output.ords[i];
 				if (dedupSet.Contains(ent))
 				{
@@ -312,37 +289,31 @@ namespace org.apache.lucene.analysis.synonym
 				}
 				dedupSet.Add(ent);
 			  }
-			  scratchOutput.writeVInt(output.ords[i]);
+			  scratchOutput.WriteVInt(output.ords[i]);
 			  count++;
 			}
 
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int pos = scratchOutput.getPosition();
 			int pos = scratchOutput.Position;
-			scratchOutput.writeVInt(count << 1 | (output.includeOrig ? 0 : 1));
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int pos2 = scratchOutput.getPosition();
+			scratchOutput.WriteVInt(count << 1 | (output.includeOrig ? 0 : 1));
 			int pos2 = scratchOutput.Position;
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int vIntLen = pos2-pos;
 			int vIntLen = pos2 - pos;
 
 			// Move the count + includeOrig to the front of the byte[]:
-			Array.Copy(scratch.bytes, pos, spare, 0, vIntLen);
-			Array.Copy(scratch.bytes, 0, scratch.bytes, vIntLen, pos);
-			Array.Copy(spare, 0, scratch.bytes, 0, vIntLen);
+			Array.Copy(scratch.Bytes, pos, spare, 0, vIntLen);
+			Array.Copy(scratch.Bytes, 0, scratch.Bytes, vIntLen, pos);
+			Array.Copy(spare, 0, scratch.Bytes, 0, vIntLen);
 
 			if (dedupSet != null)
 			{
 			  dedupSet.Clear();
 			}
 
-			scratch.length = scratchOutput.Position - scratch.offset;
+			scratch.Length = scratchOutput.Position - scratch.Offset;
 			//System.out.println("  add input=" + input + " output=" + scratch + " offset=" + scratch.offset + " length=" + scratch.length + " count=" + count);
-			builder.add(Util.toUTF32(input, scratchIntsRef), BytesRef.deepCopyOf(scratch));
+			builder.Add(Util.ToUTF32(input, scratchIntsRef), BytesRef.DeepCopyOf(scratch));
 		  }
 
-		  FST<BytesRef> fst = builder.finish();
+		  FST<BytesRef> fst = builder.Finish();
 		  return new SynonymMap(fst, words, maxHorizontalContext);
 		}
 	  }
@@ -365,30 +336,26 @@ namespace org.apache.lucene.analysis.synonym
 		/// <summary>
 		/// Parse the given input, adding synonyms to the inherited <seealso cref="Builder"/>. </summary>
 		/// <param name="in"> The input to parse </param>
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: public abstract void parse(java.io.Reader in) throws java.io.IOException, java.text.ParseException;
-		public abstract void parse(Reader @in);
+		public abstract void Parse(Reader @in);
 
 		/// <summary>
 		/// Sugar: analyzes the text with the analyzer and
 		///  separates by <seealso cref="SynonymMap#WORD_SEPARATOR"/>.
 		///  reuse and its chars must not be null. 
 		/// </summary>
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: public org.apache.lucene.util.CharsRef analyze(String text, org.apache.lucene.util.CharsRef reuse) throws java.io.IOException
-		public virtual CharsRef analyze(string text, CharsRef reuse)
+		public virtual CharsRef Analyze(string text, CharsRef reuse)
 		{
 		  IOException priorException = null;
-		  TokenStream ts = analyzer.tokenStream("", text);
+		  TokenStream ts = analyzer.TokenStream("", text);
 		  try
 		  {
-			CharTermAttribute termAtt = ts.addAttribute(typeof(CharTermAttribute));
-			PositionIncrementAttribute posIncAtt = ts.addAttribute(typeof(PositionIncrementAttribute));
-			ts.reset();
-			reuse.length = 0;
-			while (ts.incrementToken())
+              var termAtt = ts.AddAttribute < ICharTermAttribute>();
+              var posIncAtt = ts.AddAttribute < IPositionIncrementAttribute>();
+			ts.Reset();
+			reuse.Length = 0;
+			while (ts.IncrementToken())
 			{
-			  int length = termAtt.length();
+			  int length = termAtt.Length;
 			  if (length == 0)
 			  {
 				throw new System.ArgumentException("term: " + text + " analyzed to a zero-length token");
@@ -397,17 +364,17 @@ namespace org.apache.lucene.analysis.synonym
 			  {
 				throw new System.ArgumentException("term: " + text + " analyzed to a token with posinc != 1");
 			  }
-			  reuse.grow(reuse.length + length + 1); // current + word + separator
-			  int end = reuse.offset + reuse.length;
-			  if (reuse.length > 0)
+			  reuse.Grow(reuse.Length + length + 1); // current + word + separator
+			  int end = reuse.Offset + reuse.Length;
+			  if (reuse.Length > 0)
 			  {
-				reuse.chars[end++] = SynonymMap.WORD_SEPARATOR;
-				reuse.length++;
+				reuse.Chars[end++] = SynonymMap.WORD_SEPARATOR;
+				reuse.Length++;
 			  }
-			  Array.Copy(termAtt.buffer(), 0, reuse.chars, end, length);
-			  reuse.length += length;
+			  Array.Copy(termAtt.Buffer(), 0, reuse.Chars, end, length);
+			  reuse.Length += length;
 			}
-			ts.end();
+			ts.End();
 		  }
 		  catch (IOException e)
 		  {
@@ -415,9 +382,9 @@ namespace org.apache.lucene.analysis.synonym
 		  }
 		  finally
 		  {
-			IOUtils.closeWhileHandlingException(priorException, ts);
+			IOUtils.CloseWhileHandlingException(priorException, ts);
 		  }
-		  if (reuse.length == 0)
+		  if (reuse.Length == 0)
 		  {
 			throw new System.ArgumentException("term: " + text + " was completely eliminated by analyzer");
 		  }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Synonym/WordnetSynonymParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/WordnetSynonymParser.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/WordnetSynonymParser.cs
index 0bf9890..44e700d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/WordnetSynonymParser.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/WordnetSynonymParser.cs
@@ -1,6 +1,8 @@
 using System;
+using Lucene.Net.Util;
+using Reader = System.IO.TextReader;
 
-namespace org.apache.lucene.analysis.synonym
+namespace Lucene.Net.Analysis.Synonym
 {
 
 	/*
@@ -19,11 +21,7 @@ namespace org.apache.lucene.analysis.synonym
 	 * See the License for the specific language governing permissions and
 	 * limitations under the License.
 	 */
-
-
-	using CharsRef = org.apache.lucene.util.CharsRef;
-
-	/// <summary>
+    /// <summary>
 	/// Parser for wordnet prolog format
 	/// <para>
 	/// See http://wordnet.princeton.edu/man/prologdb.5WN.html for a description of the format.
@@ -40,9 +38,7 @@ namespace org.apache.lucene.analysis.synonym
 		this.expand = expand;
 	  }
 
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: @Override public void parse(java.io.Reader in) throws java.io.IOException, java.text.ParseException
-	  public override void parse(Reader @in)
+	  public override void Parse(Reader @in)
 	  {
 		LineNumberReader br = new LineNumberReader(@in);
 		try
@@ -102,7 +98,7 @@ namespace org.apache.lucene.analysis.synonym
 		int end = line.LastIndexOf('\'');
 
 		string text = line.Substring(start, end - start).Replace("''", "'");
-		return analyze(text, reuse);
+		return Analyze(text, reuse);
 	  }
 
 	  private void addInternal(CharsRef[] synset, int size)
@@ -118,7 +114,7 @@ namespace org.apache.lucene.analysis.synonym
 		  {
 			for (int j = 0; j < size; j++)
 			{
-			  add(synset[i], synset[j], false);
+			  Add(synset[i], synset[j], false);
 			}
 		  }
 		}
@@ -126,7 +122,7 @@ namespace org.apache.lucene.analysis.synonym
 		{
 		  for (int i = 0; i < size; i++)
 		  {
-			add(synset[i], synset[0], false);
+			Add(synset[i], synset[0], false);
 		  }
 		}
 	  }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
index 5fe93c3..e968a77 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
@@ -5,7 +5,8 @@ using Lucene.Net.Analysis.Core;
 using Lucene.Net.Support;
 using Lucene.Net.Util;
 using org.apache.lucene.analysis.util;
-using Version = System.Version;
+using Reader = System.IO.TextReader;
+using Version = Lucene.Net.Util.LuceneVersion;
 
 namespace Lucene.Net.Analysis.Util
 {
@@ -48,7 +49,7 @@ namespace Lucene.Net.Analysis.Util
 
 	  /// <summary>
 	  /// the luceneVersion arg </summary>
-	  protected internal readonly Lucene.Net.Util.Version luceneMatchVersion;
+	  protected internal readonly Lucene.Net.Util.LuceneVersion luceneMatchVersion;
 
         /// <summary>
 	  /// Initialize this factory via a set of key-value pairs.
@@ -58,7 +59,7 @@ namespace Lucene.Net.Analysis.Util
 	      ExplicitLuceneMatchVersion = false;
 	      originalArgs = Collections.UnmodifiableMap(args);
 		string version = get(args, LUCENE_MATCH_VERSION_PARAM);
-		luceneMatchVersion = version == null ? null : Lucene.Net.Util.Version.ParseLeniently(version);
+		luceneMatchVersion = version == null ? null : Lucene.Net.Util.LuceneVersion.ParseLeniently(version);
 		args.Remove(CLASS_NAME); // consume the class arg
 	  }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
index 4e76504..a760b2c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
@@ -1,12 +1,11 @@
 using System;
-using System.Diagnostics;
 using System.Collections;
 using System.Collections.Generic;
+using System.Diagnostics;
 using System.Text;
-using Lucene.Net.Analysis.Util;
 using Lucene.Net.Support;
 
-namespace org.apache.lucene.analysis.util
+namespace Lucene.Net.Analysis.Util
 {
 
 	/*
@@ -53,9 +52,7 @@ namespace org.apache.lucene.analysis.util
 	public class CharArrayMap<V> : IDictionary<object, V>
 	{
 	  // private only because missing generics
-//JAVA TO C# CONVERTER TODO TASK: Java wildcard generics are not converted to .NET:
-//ORIGINAL LINE: private static final CharArrayMap<?> EMPTY_MAP = new EmptyCharArrayMap<>();
-	  private static readonly CharArrayMap<?> EMPTY_MAP = new EmptyCharArrayMap<?>();
+	  private static readonly CharArrayMap<char[]> EMPTY_MAP = new EmptyCharArrayMap<char[]>();
 
 	  private const int INIT_SIZE = 8;
 	  private readonly CharacterUtils charUtils;
@@ -76,7 +73,7 @@ namespace org.apache.lucene.analysis.util
 	  /// <param name="ignoreCase">
 	  ///          <code>false</code> if and only if the set should be case sensitive
 	  ///          otherwise <code>true</code>. </param>
-	  public CharArrayMap(Lucene.Net.Util.Version matchVersion, int startSize, bool ignoreCase)
+	  public CharArrayMap(Lucene.Net.Util.LuceneVersion matchVersion, int startSize, bool ignoreCase)
 	  {
 		this.ignoreCase = ignoreCase;
 		int size_Renamed = INIT_SIZE;
@@ -86,7 +83,7 @@ namespace org.apache.lucene.analysis.util
 		}
 		keys = new char[size_Renamed][];
 		values = (V[]) new object[size_Renamed];
-		this.charUtils = CharacterUtils.getInstance(matchVersion);
+		this.charUtils = CharacterUtils.GetInstance(matchVersion);
 		this.matchVersion = matchVersion;
 	  }
 
@@ -401,14 +398,14 @@ namespace org.apache.lucene.analysis.util
 		return code;
 	  }
 
-	  private int getHashCode(CharSequence text)
+	  private int getHashCode(ICharSequence text)
 	  {
 		if (text == null)
 		{
 		  throw new System.NullReferenceException();
 		}
 		int code = 0;
-		int len = text.length();
+		int len = text.Length();
 		if (ignoreCase)
 		{
 		  for (int i = 0; i < len;)
@@ -764,7 +761,7 @@ namespace org.apache.lucene.analysis.util
 	  ///         is a <seealso cref="CharArrayMap"/> the ignoreCase property as well as the
 	  ///         matchVersion will be of the given map will be preserved. </returns>
 //JAVA TO C# CONVERTER TODO TASK: The following line could not be converted:
-	  SuppressWarnings("unchecked") public static <V> CharArrayMap<V> copy(final org.apache.lucene.util.Version matchVersion, final java.util.Map<?,? extends V> map)
+	  SuppressWarnings("unchecked") public static <V> CharArrayMap<V> copy(final org.org.apache.lucene.util.Version matchVersion, final java.util.Map<?,? extends V> map)
 	  {
 		if (map == EMPTY_MAP)
 		{

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs
index 02d5ac8..f319675 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs
@@ -32,7 +32,7 @@ namespace Lucene.Net.Analysis.Util
 	/// to a String first.
 	/// 
 	/// <a name="version"></a>
-	/// <para>You must specify the required <seealso cref="Version"/>
+	/// <para>You must specify the required <seealso cref="LuceneVersion"/>
 	/// compatibility when creating <seealso cref="CharArraySet"/>:
 	/// <ul>
 	///   <li> As of 3.1, supplementary characters are
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis.Util
 	/// lowercased correctly due to the lack of Unicode 4
 	/// support in JDK 1.4. To use instances of
 	/// <seealso cref="CharArraySet"/> with the behavior before Lucene
-	/// 3.1 pass a <seealso cref="Version"/> < 3.1 to the constructors.
+	/// 3.1 pass a <seealso cref="LuceneVersion"/> < 3.1 to the constructors.
 	/// <P>
 	/// <em>Please note:</em> This class implements <seealso cref="java.util.Set Set"/> but
 	/// does not behave like it should in all cases. The generic type is
@@ -71,7 +71,7 @@ namespace Lucene.Net.Analysis.Util
 	  /// <param name="ignoreCase">
 	  ///          <code>false</code> if and only if the set should be case sensitive
 	  ///          otherwise <code>true</code>. </param>
-	  public CharArraySet(Lucene.Net.Util.Version matchVersion, int startSize, bool ignoreCase) : this(new CharArrayMap<>(matchVersion, startSize, ignoreCase))
+	  public CharArraySet(Lucene.Net.Util.LuceneVersion matchVersion, int startSize, bool ignoreCase) : this(new CharArrayMap<>(matchVersion, startSize, ignoreCase))
 	  {
 	  }
 
@@ -86,7 +86,7 @@ namespace Lucene.Net.Analysis.Util
 	  /// <param name="ignoreCase">
 	  ///          <code>false</code> if and only if the set should be case sensitive
 	  ///          otherwise <code>true</code>. </param>
-	  public CharArraySet<T1>(Version matchVersion, ICollection<T1> c, bool ignoreCase) : this(matchVersion, c.Count, ignoreCase)
+	  public CharArraySet<T1>(LuceneVersion matchVersion, ICollection<T1> c, bool ignoreCase) : this(matchVersion, c.Count, ignoreCase)
 	  {
 		AddAll(c);
 	  }
@@ -189,9 +189,9 @@ namespace Lucene.Net.Analysis.Util
 	  /// is a <seealso cref="CharArraySet"/> the ignoreCase property will be preserved.
 	  /// <para>
 	  /// <b>Note:</b> If you intend to create a copy of another <seealso cref="CharArraySet"/> where
-	  /// the <seealso cref="Version"/> of the source set differs from its copy
+	  /// the <seealso cref="LuceneVersion"/> of the source set differs from its copy
 	  /// <seealso cref="#CharArraySet(Version, Collection, boolean)"/> should be used instead.
-	  /// The <seealso cref="#copy(Version, Set)"/> will preserve the <seealso cref="Version"/> of the
+	  /// The <seealso cref="#copy(Version, Set)"/> will preserve the <seealso cref="LuceneVersion"/> of the
 	  /// source set it is an instance of <seealso cref="CharArraySet"/>.
 	  /// </para>
 	  /// </summary>
@@ -204,7 +204,7 @@ namespace Lucene.Net.Analysis.Util
 	  /// <returns> a copy of the given set as a <seealso cref="CharArraySet"/>. If the given set
 	  ///         is a <seealso cref="CharArraySet"/> the ignoreCase property as well as the
 	  ///         matchVersion will be of the given set will be preserved. </returns>
-	  public static CharArraySet Copy<T1>(Version matchVersion, HashSet<T1> set)
+	  public static CharArraySet Copy<T1>(LuceneVersion matchVersion, HashSet<T1> set)
 	  {
 		if (set == EMPTY_SET)
 		{

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
index df61504..d37b681 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.Util
     /// <summary>
     /// An abstract base class for simple, character-oriented tokenizers. 
     /// <para>
-    /// <a name="version">You must specify the required <seealso cref="Version"/> compatibility
+    /// <a name="version">You must specify the required <seealso cref="LuceneVersion"/> compatibility
     /// when creating <seealso cref="CharTokenizer"/>:
     /// <ul>
     /// <li>As of 3.1, <seealso cref="CharTokenizer"/> uses an int based API to normalize and
@@ -45,18 +45,18 @@ namespace Lucene.Net.Analysis.Util
     /// </para>
     /// <para>
     /// As of Lucene 3.1 each <seealso cref="CharTokenizer"/> - constructor expects a
-    /// <seealso cref="Version"/> argument. Based on the given <seealso cref="Version"/> either the new
+    /// <seealso cref="LuceneVersion"/> argument. Based on the given <seealso cref="LuceneVersion"/> either the new
     /// API or a backwards compatibility layer is used at runtime. For
-    /// <seealso cref="Version"/> < 3.1 the backwards compatibility layer ensures correct
+    /// <seealso cref="LuceneVersion"/> < 3.1 the backwards compatibility layer ensures correct
     /// behavior even for indexes build with previous versions of Lucene. If a
-    /// <seealso cref="Version"/> >= 3.1 is used <seealso cref="CharTokenizer"/> requires the new API to
+    /// <seealso cref="LuceneVersion"/> >= 3.1 is used <seealso cref="CharTokenizer"/> requires the new API to
     /// be implemented by the instantiated class. Yet, the old <i>char</i> based API
     /// is not required anymore even if backwards compatibility must be preserved.
     /// <seealso cref="CharTokenizer"/> subclasses implementing the new API are fully backwards
-    /// compatible if instantiated with <seealso cref="Version"/> < 3.1.
+    /// compatible if instantiated with <seealso cref="LuceneVersion"/> < 3.1.
     /// </para>
     /// <para>
-    /// <strong>Note:</strong> If you use a subclass of <seealso cref="CharTokenizer"/> with <seealso cref="Version"/> >=
+    /// <strong>Note:</strong> If you use a subclass of <seealso cref="CharTokenizer"/> with <seealso cref="LuceneVersion"/> >=
     /// 3.1 on an index build with a version < 3.1, created tokens might not be
     /// compatible with the terms in your index.
     /// </para>
@@ -73,7 +73,7 @@ namespace Lucene.Net.Analysis.Util
         ///          Lucene version to match </param>
         /// <param name="input">
         ///          the input to split up into tokens </param>
-        protected CharTokenizer(Version matchVersion, TextReader input)
+        protected CharTokenizer(LuceneVersion matchVersion, TextReader input)
             : base(input)
         {
             termAtt = AddAttribute<ICharTermAttribute>();
@@ -91,7 +91,7 @@ namespace Lucene.Net.Analysis.Util
         ///          the attribute factory to use for this <seealso cref="Tokenizer"/> </param>
         /// <param name="input">
         ///          the input to split up into tokens </param>
-        public CharTokenizer(Version matchVersion, AttributeFactory factory, TextReader input)
+        public CharTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader input)
             : base(factory, input)
         {
             _input = input;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
index ec78bee..1a7ccc9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
@@ -2,7 +2,7 @@
 using Lucene.Net.Support;
 using Lucene.Net.Util;
 using Reader = System.IO.TextReader;
-using Version = Lucene.Net.Util.Version;
+using Version = Lucene.Net.Util.LuceneVersion;
 
 namespace Lucene.Net.Analysis.Util
 {
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.Util
     /// <summary>
 	/// <seealso cref="CharacterUtils"/> provides a unified interface to Character-related
 	/// operations to implement backwards compatible character operations based on a
-	/// <seealso cref="Version"/> instance.
+	/// <seealso cref="LuceneVersion"/> instance.
 	/// 
 	/// @lucene.internal
 	/// </summary>
@@ -37,15 +37,15 @@ namespace Lucene.Net.Analysis.Util
 
 	  /// <summary>
 	  /// Returns a <seealso cref="CharacterUtils"/> implementation according to the given
-	  /// <seealso cref="Version"/> instance.
+	  /// <seealso cref="LuceneVersion"/> instance.
 	  /// </summary>
 	  /// <param name="matchVersion">
 	  ///          a version instance </param>
 	  /// <returns> a <seealso cref="CharacterUtils"/> implementation according to the given
-	  ///         <seealso cref="Version"/> instance. </returns>
-	  public static CharacterUtils GetInstance(Version matchVersion)
+	  ///         <seealso cref="LuceneVersion"/> instance. </returns>
+	  public static CharacterUtils GetInstance(LuceneVersion matchVersion)
 	  {
-		return matchVersion.OnOrAfter(Version.LUCENE_31) ? JAVA_5 : JAVA_4;
+		return matchVersion.OnOrAfter(LuceneVersion.LUCENE_31) ? JAVA_5 : JAVA_4;
 	  }
 
 	  /// <summary>
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Util
 
 	  /// <summary>
 	  /// Returns the code point at the given index of the <seealso cref="CharSequence"/>.
-	  /// Depending on the <seealso cref="Version"/> passed to
+	  /// Depending on the <seealso cref="LuceneVersion"/> passed to
 	  /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
 	  /// of <seealso cref="Character#codePointAt(char[], int)"/> as it would have been
 	  /// available on a Java 1.4 JVM or on a later virtual machine version.
@@ -81,7 +81,7 @@ namespace Lucene.Net.Analysis.Util
 	  /// <summary>
 	  /// Returns the code point at the given index of the char array where only elements
 	  /// with index less than the limit are used.
-	  /// Depending on the <seealso cref="Version"/> passed to
+	  /// Depending on the <seealso cref="LuceneVersion"/> passed to
 	  /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
 	  /// of <seealso cref="Character#codePointAt(char[], int)"/> as it would have been
 	  /// available on a Java 1.4 JVM or on a later virtual machine version.
@@ -134,7 +134,7 @@ namespace Lucene.Net.Analysis.Util
 		Debug.Assert(offset <= 0 && offset <= buffer.Length);
 		for (int i = offset; i < limit;)
 		{
-		  i += char.ToChars(char.ToLower(CodePointAt(buffer, i, limit)), buffer, i);
+		  i += Character.ToChars(char.ToLower(CodePointAt(buffer, i, limit)), buffer, i);
 		}
 	  }
 
@@ -150,7 +150,7 @@ namespace Lucene.Net.Analysis.Util
 		Debug.Assert(offset <= 0 && offset <= buffer.Length);
 		for (int i = offset; i < limit;)
 		{
-		  i += char.toChars(char.ToUpper(codePointAt(buffer, i, limit)), buffer, i);
+		  i += Character.ToChars(char.ToUpper(CodePointAt(buffer, i, limit)), buffer, i);
 		}
 	  }
 
@@ -166,12 +166,8 @@ namespace Lucene.Net.Analysis.Util
 		int codePointCount_Renamed = 0;
 		for (int i = 0; i < srcLen;)
 		{
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int cp = codePointAt(src, srcOff + i, srcOff + srcLen);
-		  int cp = codePointAt(src, srcOff + i, srcOff + srcLen);
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final int charCount = Character.charCount(cp);
-		  int charCount = char.charCount(cp);
+		  int cp = CodePointAt(src, srcOff + i, srcOff + srcLen);
+		  int charCount = Character.CharCount(cp);
 		  dest[destOff + codePointCount_Renamed++] = cp;
 		  i += charCount;
 		}
@@ -190,7 +186,7 @@ namespace Lucene.Net.Analysis.Util
 		int written = 0;
 		for (int i = 0; i < srcLen; ++i)
 		{
-		  written += char.toChars(src[srcOff + i], dest, destOff + written);
+		  written += Character.ToChars(src[srcOff + i], dest, destOff + written);
 		}
 		return written;
 	  }
@@ -205,10 +201,10 @@ namespace Lucene.Net.Analysis.Util
 	  /// the middle of a surrogate pair, even if there are remaining characters in
 	  /// the <seealso cref="Reader"/>.
 	  /// <para>
-	  /// Depending on the <seealso cref="Version"/> passed to
+	  /// Depending on the <seealso cref="LuceneVersion"/> passed to
 	  /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method implements
 	  /// supplementary character awareness when filling the given buffer. For all
-	  /// <seealso cref="Version"/> &gt; 3.0 <seealso cref="#fill(CharacterBuffer, Reader, int)"/> guarantees
+	  /// <seealso cref="LuceneVersion"/> &gt; 3.0 <seealso cref="#fill(CharacterBuffer, Reader, int)"/> guarantees
 	  /// that the given <seealso cref="CharacterBuffer"/> will never contain a high surrogate
 	  /// character as the last element in the buffer unless it is the last available
 	  /// character in the reader. In other words, high and low surrogate pairs will
@@ -267,12 +263,12 @@ namespace Lucene.Net.Analysis.Util
 
 		public override int CodePointAt(string seq, int offset)
 		{
-		  return char.CodePointAt(seq, offset);
+		  return Character.CodePointAt(seq, offset);
 		}
 
 		public override int CodePointAt(char[] chars, int offset, int limit)
 		{
-		 return char.CodePointAt(chars, offset, limit);
+		 return Character.CodePointAt(chars, offset, limit);
 		}
 
 		public override bool Fill(CharacterBuffer buffer, Reader reader, int numChars)
@@ -301,8 +297,6 @@ namespace Lucene.Net.Analysis.Util
 		  int read = ReadFully(reader, charBuffer, offset, numChars - offset);
 
 		  buffer.length = offset + read;
-//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
-//ORIGINAL LINE: final boolean result = buffer.length == numChars;
 		  bool result = buffer.length == numChars;
 		  if (buffer.length < numChars)
 		  {
@@ -320,12 +314,12 @@ namespace Lucene.Net.Analysis.Util
 
 		public override int CodePointCount(string seq)
 		{
-		  return char.CodePointCount(seq, 0, seq.Length);
+		  return Character.CodePointCount(seq, 0, seq.Length);
 		}
 
 		public override int OffsetByCodePoints(char[] buf, int start, int count, int index, int offset)
 		{
-		  return char.OffsetByCodePoints(buf, start, count, index, offset);
+		  return Character.OffsetByCodePoints(buf, start, count, index, offset);
 		}
 	  }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
index a52cc83..1655eaf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
@@ -1,7 +1,6 @@
 using System;
 using Lucene.Net.Analysis.Tokenattributes;
 using Lucene.Net.Util;
-using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Util
 {
@@ -35,15 +34,15 @@ namespace Lucene.Net.Analysis.Util
     public abstract class FilteringTokenFilter : TokenFilter
     {
 
-        private static void CheckPositionIncrement(Version version, bool enablePositionIncrements)
+        private static void CheckPositionIncrement(LuceneVersion version, bool enablePositionIncrements)
         {
-            if (!enablePositionIncrements && version.OnOrAfter(Version.LUCENE_44))
+            if (!enablePositionIncrements && version.OnOrAfter(LuceneVersion.LUCENE_44))
             {
                 throw new System.ArgumentException("enablePositionIncrements=false is not supported anymore as of Lucene 4.4 as it can create broken token streams");
             }
         }
 
-        protected internal readonly Version version;
+        protected internal readonly LuceneVersion version;
         private readonly IPositionIncrementAttribute posIncrAtt;
         private bool enablePositionIncrements; // no init needed, as ctor enforces setting value!
         private bool first = true;
@@ -56,7 +55,7 @@ namespace Lucene.Net.Analysis.Util
         /// <param name="input">                    the input to consume </param>
         /// @deprecated enablePositionIncrements=false is not supported anymore as of Lucene 4.4 
         [Obsolete("enablePositionIncrements=false is not supported anymore as of Lucene 4.4")]
-        public FilteringTokenFilter(Lucene.Net.Util.Version version, bool enablePositionIncrements, TokenStream input)
+        public FilteringTokenFilter(Lucene.Net.Util.LuceneVersion version, bool enablePositionIncrements, TokenStream input)
             : this(version, input)
         {
             posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
@@ -68,7 +67,7 @@ namespace Lucene.Net.Analysis.Util
         /// Create a new <seealso cref="FilteringTokenFilter"/>. </summary>
         /// <param name="version"> the Lucene match version </param>
         /// <param name="in">      the <seealso cref="TokenStream"/> to consume </param>
-        public FilteringTokenFilter(Version version, TokenStream @in)
+        public FilteringTokenFilter(LuceneVersion version, TokenStream @in)
             : base(@in)
         {
             this.version = version;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs
index 4081e36..246bee9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs
@@ -1,4 +1,5 @@
 using System;
+using Lucene.Net.Support;
 
 namespace Lucene.Net.Analysis.Util
 {
@@ -23,7 +24,7 @@ namespace Lucene.Net.Analysis.Util
 	/// <summary>
 	/// A StringBuilder that allows one to access the array.
 	/// </summary>
-	public class OpenStringBuilder : Appendable, CharSequence
+	public class OpenStringBuilder : IAppendable, ICharSequence
 	{
 	  protected internal char[] buf;
 	  protected internal int len;
@@ -34,7 +35,7 @@ namespace Lucene.Net.Analysis.Util
 
 	  public OpenStringBuilder(int size)
 	  {
-		buf = new char[size_Renamed];
+		buf = new char[size];
 	  }
 
 	  public OpenStringBuilder(char[] arr, int len)
@@ -48,6 +49,7 @@ namespace Lucene.Net.Analysis.Util
 		  {
 			  this.len = value;
 		  }
+          get { return len; }
 	  }
 
 	  public virtual void set(char[] arr, int end)
@@ -63,15 +65,12 @@ namespace Lucene.Net.Analysis.Util
 			  return buf;
 		  }
 	  }
-	  public virtual int size()
+	  public virtual int Size()
 	  {
 		  return len;
 	  }
-	  public override int length()
-	  {
-		  return len;
-	  }
-	  public virtual int capacity()
+
+	  public virtual int Capacity()
 	  {
 		  return buf.Length;
 	  }
@@ -97,12 +96,12 @@ namespace Lucene.Net.Analysis.Util
 		return this;
 	  }
 
-	  public override char charAt(int index)
+	  public override char CharAt(int index)
 	  {
 		return buf[index];
 	  }
 
-	  public virtual void setCharAt(int index, char ch)
+	  public virtual void SetCharAt(int index, char ch)
 	  {
 		buf[index] = ch;
 	  }
@@ -131,7 +130,7 @@ namespace Lucene.Net.Analysis.Util
 	  protected internal virtual void resize(int len)
 	  {
 		char[] newbuf = new char[Math.Max(buf.Length << 1, len)];
-		Array.Copy(buf, 0, newbuf, 0, size());
+		Array.Copy(buf, 0, newbuf, 0, Size());
 		buf = newbuf;
 	  }
 
@@ -191,14 +190,14 @@ namespace Lucene.Net.Analysis.Util
 
 	  public virtual char[] ToCharArray()
 	  {
-		char[] newbuf = new char[size()];
-		Array.Copy(buf, 0, newbuf, 0, size());
+		char[] newbuf = new char[Size()];
+		Array.Copy(buf, 0, newbuf, 0, Size());
 		return newbuf;
 	  }
 
 	  public override string ToString()
 	  {
-		return new string(buf, 0, size());
+		return new string(buf, 0, Size());
 	  }
 	}
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
index eceb3e6..23a4ad5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
@@ -15,8 +15,6 @@
  * limitations under the License.
  */
 
-using org.apache.lucene.analysis.util;
-
 namespace Lucene.Net.Analysis.Util
 {
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs
index 231f550..49fb6c4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs
@@ -3,6 +3,8 @@ using System.Diagnostics;
 using System.IO;
 using Lucene.Net.Analysis.Tokenattributes;
 using org.apache.lucene.analysis.util;
+using Reader = System.IO.TextReader;
+using Version = Lucene.Net.Util.LuceneVersion;
 
 namespace Lucene.Net.Analysis.Util
 {
@@ -118,7 +120,7 @@ namespace Lucene.Net.Analysis.Util
 	  {
 		for (int i = length - 1; i >= 0; i--)
 		{
-		  if (isSafeEnd(buffer[i]))
+		  if (IsSafeEnd(buffer[i]))
 		  {
 			return i + 1;
 		  }
@@ -153,7 +155,7 @@ namespace Lucene.Net.Analysis.Util
 	        int leftover = length - usableLength;
 	        Array.Copy(buffer, usableLength, buffer, 0, leftover);
 	        int requested = buffer.Length - leftover;
-	        int returned = read(input, buffer, leftover, requested);
+	        int returned = Read(input, buffer, leftover, requested);
 	        length = returned < 0 ? leftover : returned + leftover;
 	        if (returned < requested) // reader has been emptied, process the rest
 	        {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
index de736a3..915f4a6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
@@ -17,7 +17,8 @@
 using System;
 using System.IO;
 using Lucene.Net.Util;
-using Version = System.Version;
+using Reader = System.IO.TextReader;
+using Version = Lucene.Net.Util.LuceneVersion;
 
 namespace Lucene.Net.Analysis.Util
 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
index c7769ba..21f2ce3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
@@ -1,6 +1,5 @@
 using System;
 using System.Collections.Generic;
-using org.apache.lucene.analysis.util;
 
 namespace Lucene.Net.Analysis.Util
 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
index 65d7325..d00b972 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
@@ -2,7 +2,6 @@
 using System.Collections.Generic;
 using System.IO;
 using Lucene.Net.Util;
-using org.apache.lucene.analysis.util;
 
 namespace Lucene.Net.Analysis.Util
 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
index 36e1877..f15b4a5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
@@ -3,7 +3,8 @@ using System.Collections.Generic;
 using System.IO;
 using Lucene.Net.Util;
 using org.apache.lucene.analysis.util;
-using Version = System.Version;
+using Reader = System.IO.TextReader;
+using Version = Lucene.Net.Util.LuceneVersion;
 
 namespace Lucene.Net.Analysis.Util
 {
@@ -238,8 +239,6 @@ namespace Lucene.Net.Analysis.Util
 	  /// </summary>
 	  /// <returns> a list of non-blank non-comment lines with whitespace trimmed </returns>
 	  /// <exception cref="IOException"> If there is a low-level I/O error. </exception>
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: public static java.util.List<String> getLines(java.io.InputStream stream, java.nio.charset.Charset charset) throws java.io.IOException
 	  public static IList<string> getLines(InputStream stream, Charset charset)
 	  {
 		BufferedReader input = null;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
index 0c4df1f..1a3750d 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
@@ -2,7 +2,6 @@
 using System.IO;
 using Lucene.Net.Analysis;
 using Lucene.Net.Analysis.Core;
-using org.apache.lucene.collation;
 
 namespace Lucene.Net.Collation
 {
@@ -35,7 +34,7 @@ namespace Lucene.Net.Collation
 	/// <para>
 	///   Converts the token into its <seealso cref="java.text.CollationKey"/>, and then
 	///   encodes the CollationKey either directly or with 
-	///   <seealso cref="IndexableBinaryStringTools"/> (see <a href="#version">below</a>), to allow 
+	///   <seealso cref="Util.IndexableBinaryStringTools"/> (see <a href="#version">below</a>), to allow 
 	///   it to be stored as an index term.
 	/// </para>
 	/// <para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
index a098632..14afd41 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
@@ -1,6 +1,9 @@
 using System;
+using Lucene.Net.Analysis;
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Util;
 
-namespace org.apache.lucene.collation
+namespace Lucene.Net.Collation
 {
 
 	/*
@@ -19,16 +22,7 @@ namespace org.apache.lucene.collation
 	 * See the License for the specific language governing permissions and
 	 * limitations under the License.
 	 */
-
-
-	using TokenFilter = org.apache.lucene.analysis.TokenFilter;
-	using TokenStream = org.apache.lucene.analysis.TokenStream;
-	using CharTermAttribute = org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-	using IndexableBinaryStringTools = org.apache.lucene.util.IndexableBinaryStringTools;
-
-
-
-	/// <summary>
+    /// <summary>
 	/// <para>
 	///   Converts each token into its <seealso cref="java.text.CollationKey"/>, and then
 	///   encodes the CollationKey with <seealso cref="IndexableBinaryStringTools"/>, to allow 
@@ -74,7 +68,7 @@ namespace org.apache.lucene.collation
 	/// </para> </summary>
 	/// @deprecated Use <seealso cref="CollationAttributeFactory"/> instead, which encodes
 	///  terms directly as bytes. This filter will be removed in Lucene 5.0 
-	[Obsolete("Use <seealso cref="CollationAttributeFactory"/> instead, which encodes")]
+	[Obsolete("Use <seealso cref=\"CollationAttributeFactory\"/> instead, which encodes")]
 	public sealed class CollationKeyFilter : TokenFilter
 	{
 	  private readonly Collator collator;
@@ -89,13 +83,11 @@ namespace org.apache.lucene.collation
 		this.collator = (Collator) collator.clone();
 	  }
 
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: @Override public boolean incrementToken() throws java.io.IOException
-	  public override bool incrementToken()
+	  public override bool IncrementToken()
 	  {
-		if (input.incrementToken())
+		if (input.IncrementToken())
 		{
-		  sbyte[] collationKey = collator.getCollationKey(termAtt.ToString()).toByteArray();
+		  var collationKey = collator.GetCollationKey(termAtt.ToString()).toByteArray();
 		  int encodedLength = IndexableBinaryStringTools.getEncodedLength(collationKey, 0, collationKey.Length);
 		  termAtt.resizeBuffer(encodedLength);
 		  termAtt.Length = encodedLength;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f77e83ec/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
index 7396e1f..f42e863 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
@@ -2,6 +2,7 @@
 using System.Collections.Generic;
 using System.Text;
 using Lucene.Net.Analysis.Util;
+using Lucene.Net.Collation;
 
 namespace org.apache.lucene.collation
 {
@@ -70,7 +71,7 @@ namespace org.apache.lucene.collation
 	/// <seealso cref= RuleBasedCollator
 	/// @since solr 3.1 </seealso>
 	/// @deprecated use <seealso cref="CollationKeyAnalyzer"/> instead. 
-	[Obsolete("use <seealso cref="CollationKeyAnalyzer"/> instead.")]
+	[Obsolete("use <seealso cref=\"CollationKeyAnalyzer\"/> instead.")]
 	public class CollationKeyFilterFactory : TokenFilterFactory, MultiTermAwareComponent, ResourceLoaderAware
 	{
 	  private Collator collator;


Mime
View raw message