lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ccurr...@apache.org
Subject svn commit: r1394820 [3/6] - in /lucene.net/trunk: src/contrib/Analyzers/ src/contrib/Analyzers/AR/ src/contrib/Analyzers/BR/ src/contrib/Analyzers/CJK/ src/contrib/Analyzers/Compound/ src/contrib/Analyzers/Compound/Hyphenation/ src/contrib/Analyzers/C...
Date Fri, 05 Oct 2012 21:22:59 GMT
Modified: lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs (original)
+++ lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs Fri Oct  5 21:22:51 2012
@@ -55,14 +55,14 @@ namespace Lucene.Net.Index.Memory
     /// Each instance can hold at most one Lucene "document", with a document containing
     /// zero or more "fields", each field having a name and a fulltext value. The
     /// fulltext value is tokenized (split and transformed) into zero or more index terms 
-    /// (aka words) on <code>addField()</code>, according to the policy implemented by an
+    /// (aka words) on <c>addField()</c>, according to the policy implemented by an
     /// Analyzer. For example, Lucene analyzers can split on whitespace, normalize to lower case
     /// for case insensitivity, ignore common terms with little discriminatory value such as "he", "in", "and" (stop
     /// words), reduce the terms to their natural linguistic root form such as "fishing"
     /// being reduced to "fish" (stemming), resolve synonyms/inflexions/thesauri 
     /// (upon indexing and/or querying), etc. For details, see
     /// <a target="_blank" href="http://today.java.net/pub/a/today/2003/07/30/LuceneIntro.html">Lucene Analyzer Intro</a>.
-    /// <p>
+    /// <p/>
     /// Arbitrary Lucene queries can be run against this class - see <a target="_blank" 
     /// href="../../../../../../../queryparsersyntax.html">Lucene Query Syntax</a>
     /// as well as <a target="_blank" 
@@ -70,12 +70,12 @@ namespace Lucene.Net.Index.Memory
     /// Note that a Lucene query selects on the field names and associated (indexed) 
     /// tokenized terms, not on the original fulltext(s) - the latter are not stored 
     /// but rather thrown away immediately after tokenization.
-    /// <p>
+    /// <p/>
     /// For some interesting background information on search technology, see Bob Wyman's
     /// <a target="_blank" 
     /// href="http://bobwyman.pubsub.com/main/2005/05/mary_hodder_poi.html">Prospective Search</a>, 
     /// Jim Gray's
-    /// <a target="_blank" href="http://www.acmqueue.org/modules.php?name=Content&pa=showpage&pid=293&page=4">
+    /// <a target="_blank" href="http://www.acmqueue.org/modules.php?name=Content&amp;pa=showpage&amp;pid=293&amp;page=4">
     /// A Call to Arms - Custom subscriptions</a>, and Tim Bray's
     /// <a target="_blank" 
     /// href="http://www.tbray.org/ongoing/When/200x/2003/07/30/OnSearchTOC">On Search, the Series</a>.
@@ -134,11 +134,11 @@ namespace Lucene.Net.Index.Memory
     /// <p/>
     /// This class performs very well for very small texts (e.g. 10 chars) 
     /// as well as for large texts (e.g. 10 MB) and everything in between. 
-    /// Typically, it is about 10-100 times faster than <code>RAMDirectory</code>.
-    /// Note that <code>RAMDirectory</code> has particularly 
+    /// Typically, it is about 10-100 times faster than <c>RAMDirectory</c>.
+    /// Note that <c>RAMDirectory</c> has particularly 
     /// large efficiency overheads for small to medium sized texts, both in time and space.
     /// Indexing a field with N tokens takes O(N) in the best case, and O(N logN) in the worst 
-    /// case. Memory consumption is probably larger than for <code>RAMDirectory</code>.
+    /// case. Memory consumption is probably larger than for <c>RAMDirectory</c>.
     /// <p/>
     /// Example throughput of many simple term queries over a single MemoryIndex: 
     /// ~500000 queries/sec on a MacBook Pro, jdk 1.5.0_06, server VM. 
@@ -155,23 +155,23 @@ namespace Lucene.Net.Index.Memory
     [Serializable]
     public partial class MemoryIndex
     {
-        /** info for each field: Map<String fieldName, Info field> */
+        /* info for each field: Map<String fieldName, Info field> */
         private HashMap<String, Info> fields = new HashMap<String, Info>();
 
-        /** fields sorted ascending by fieldName; lazily computed on demand */
+        /* fields sorted ascending by fieldName; lazily computed on demand */
         [NonSerialized] private KeyValuePair<String, Info>[] sortedFields;
 
-        /** pos: positions[3*i], startOffset: positions[3*i +1], endOffset: positions[3*i +2] */
+        /* pos: positions[3*i], startOffset: positions[3*i +1], endOffset: positions[3*i +2] */
         private int stride;
 
-        /** Could be made configurable; See {@link Document#setBoost(float)} */
+        /* Could be made configurable; See {@link Document#setBoost(float)} */
         private static float docBoost = 1.0f;
 
         private static long serialVersionUID = 2782195016849084649L;
 
         private static bool DEBUG = false;
 
-        /**
+        /*
          * Constructs an empty instance.
          */
         public MemoryIndex()
@@ -179,7 +179,7 @@ namespace Lucene.Net.Index.Memory
         {
         }
 
-        /**
+        /*
          * Constructs an empty instance that can optionally store the start and end
          * character offset of each token term in the text. This can be useful for
          * highlighting of hit locations with the Lucene highlighter package.
@@ -196,7 +196,7 @@ namespace Lucene.Net.Index.Memory
             this.stride = storeOffsets ? 3 : 1;
         }
 
-        /**
+        /*
          * Convenience method; Tokenizes the given field text and adds the resulting
          * terms to the index; Equivalent to adding an indexed non-keyword Lucene
          * {@link org.apache.lucene.document.Field} that is
@@ -227,7 +227,7 @@ namespace Lucene.Net.Index.Memory
             AddField(fieldName, stream);
         }
 
-        /**
+        /*
          * Convenience method; Creates and returns a token stream that generates a
          * token for each keyword in the given collection, "as is", without any
          * transforming text analysis. The resulting token stream can be fed into
@@ -248,8 +248,8 @@ namespace Lucene.Net.Index.Memory
             return new KeywordTokenStream<T>(keywords);
         }
 
-        /**
-         * Equivalent to <code>addField(fieldName, stream, 1.0f)</code>.
+        /*
+         * Equivalent to <c>addField(fieldName, stream, 1.0f)</c>.
          * 
          * @param fieldName
          *            a name to be associated with the text
@@ -261,12 +261,12 @@ namespace Lucene.Net.Index.Memory
             AddField(fieldName, stream, 1.0f);
         }
 
-        /**
+        /*
          * Iterates over the given token stream and adds the resulting terms to the index;
          * Equivalent to adding a tokenized, indexed, termVectorStored, unstored,
          * Lucene {@link org.apache.lucene.document.Field}.
          * Finally closes the token stream. Note that untokenized keywords can be added with this method via 
-         * {@link #CreateKeywordTokenStream(Collection)}, the Lucene contrib <code>KeywordTokenizer</code> or similar utilities.
+         * {@link #CreateKeywordTokenStream(Collection)}, the Lucene contrib <c>KeywordTokenizer</c> or similar utilities.
          * 
          * @param fieldName
          *            a name to be associated with the text
@@ -354,7 +354,7 @@ namespace Lucene.Net.Index.Memory
             }
         }
 
-        /**
+        /*
          * Creates and returns a searcher that can be used to execute arbitrary
          * Lucene queries and to collect the resulting query results as hits.
          * 
@@ -369,7 +369,7 @@ namespace Lucene.Net.Index.Memory
             return searcher;
         }
 
-        /**
+        /*
          * Convenience method that efficiently returns the relevance score by
          * matching this index against the given Lucene query expression.
          * 
@@ -418,7 +418,7 @@ namespace Lucene.Net.Index.Memory
             }
         }
 
-        /**
+        /*
          * Returns a reasonable approximation of the main memory [bytes] consumed by
          * this instance. Useful for smart memory sensititive caches/pools. Assumes
          * fieldNames are interned, whereas tokenized terms are memory-overlaid.
@@ -465,14 +465,14 @@ namespace Lucene.Net.Index.Memory
             return positions.Size()/stride;
         }
 
-        /** sorts into ascending order (on demand), reusing memory along the way */
+        /* sorts into ascending order (on demand), reusing memory along the way */
 
         private void SortFields()
         {
             if (sortedFields == null) sortedFields = Sort(fields);
         }
 
-        /** returns a view of the given map's entries, sorted ascending by key */
+        /* returns a view of the given map's entries, sorted ascending by key */
 
         private static KeyValuePair<TKey, TValue>[] Sort<TKey, TValue>(HashMap<TKey, TValue> map)
             where TKey : class, IComparable<TKey>
@@ -485,7 +485,7 @@ namespace Lucene.Net.Index.Memory
             return entries;
         }
 
-        /**
+        /*
          * Returns a String representation of the index data for debugging purposes.
          * 
          * @return the string representation
@@ -541,7 +541,7 @@ namespace Lucene.Net.Index.Memory
         ///////////////////////////////////////////////////////////////////////////////
         // Nested classes:
         ///////////////////////////////////////////////////////////////////////////////
-        /**
+        /*
          * Index data structure for a field; Contains the tokenized term texts and
          * their positions.
          */
@@ -551,25 +551,25 @@ namespace Lucene.Net.Index.Memory
         {
             public static readonly IComparer<KeyValuePair<string, Info>> InfoComparer = new TermComparer<Info>();
             public static readonly IComparer<KeyValuePair<string, ArrayIntList>> ArrayIntListComparer = new TermComparer<ArrayIntList>(); 
-            /**
+            /*
              * Term strings and their positions for this field: Map <String
              * termText, ArrayIntList positions>
              */
             private HashMap<String, ArrayIntList> terms;
 
-            /** Terms sorted ascending by term text; computed on demand */
+            /* Terms sorted ascending by term text; computed on demand */
             [NonSerialized] private KeyValuePair<String, ArrayIntList>[] sortedTerms;
 
-            /** Number of added tokens for this field */
+            /* Number of added tokens for this field */
             private int numTokens;
 
-            /** Number of overlapping tokens for this field */
+            /* Number of overlapping tokens for this field */
             private int numOverlapTokens;
 
-            /** Boost factor for hits for this field */
+            /* Boost factor for hits for this field */
             private float boost;
 
-            /** Term for this field's fieldName, lazily computed on demand */
+            /* Term for this field's fieldName, lazily computed on demand */
             [NonSerialized] public Term template;
 
             private static long serialVersionUID = 2882195016849084649L;
@@ -608,7 +608,7 @@ namespace Lucene.Net.Index.Memory
                 get { return sortedTerms; }
             }
 
-            /**
+            /*
          * Sorts hashed terms into ascending order, reusing memory along the
          * way. Note that sorting is lazily delayed until required (often it's
          * not required at all). If a sorted view is required then hashing +
@@ -622,14 +622,14 @@ namespace Lucene.Net.Index.Memory
                 if (SortedTerms == null) sortedTerms = Sort(Terms);
             }
 
-            /** note that the frequency can be calculated as numPosition(getPositions(x)) */
+            /* note that the frequency can be calculated as numPosition(getPositions(x)) */
 
             public ArrayIntList GetPositions(String term)
             {
                 return Terms[term];
             }
 
-            /** note that the frequency can be calculated as numPosition(getPositions(x)) */
+            /* note that the frequency can be calculated as numPosition(getPositions(x)) */
 
             public ArrayIntList GetPositions(int pos)
             {
@@ -641,8 +641,8 @@ namespace Lucene.Net.Index.Memory
         ///////////////////////////////////////////////////////////////////////////////
         // Nested classes:
         ///////////////////////////////////////////////////////////////////////////////
-        /**
-         * Efficient resizable auto-expanding list holding <code>int</code> elements;
+        /*
+         * Efficient resizable auto-expanding list holding <c>int</c> elements;
          * implemented with arrays.
          */
 
@@ -720,7 +720,7 @@ namespace Lucene.Net.Index.Memory
                                                    + ", size: " + size);
             }
 
-            /** returns the first few positions (without offsets); debug only */
+            /* returns the first few positions (without offsets); debug only */
 
             public string ToString(int stride)
             {
@@ -745,7 +745,7 @@ namespace Lucene.Net.Index.Memory
         ///////////////////////////////////////////////////////////////////////////////
         private static readonly Term MATCH_ALL_TERM = new Term("");
 
-        /**
+        /*
          * Search support for Lucene framework integration; implements all methods
          * required by the Lucene IndexReader contracts.
          */
@@ -916,7 +916,7 @@ namespace Lucene.Net.Index.Memory
                 this.searcher = searcher;
             }
 
-            /** performance hack: cache norms to avoid repeated expensive calculations */
+            /* performance hack: cache norms to avoid repeated expensive calculations */
             private byte[] cachedNorms;
             private String cachedFieldName;
             private Similarity cachedSimilarity;
@@ -1062,7 +1062,7 @@ namespace Lucene.Net.Index.Memory
 
             private static readonly int LOG_PTR = (int) Math.Round(Log2(PTR));
 
-            /**
+            /*
              * Object header of any heap allocated Java object. 
              * ptr to class, info for monitor, gc, hash, etc.
              */
@@ -1125,7 +1125,7 @@ namespace Lucene.Net.Index.Memory
                 return IntPtr.Size == 8;
             }
 
-            /** logarithm to the base 2. Example: log2(4) == 2, log2(8) == 3 */
+            /* logarithm to the base 2. Example: log2(4) == 2, log2(8) == 3 */
 
             private static double Log2(double value)
             {

Modified: lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs Fri Oct  5 21:22:51 2012
@@ -143,7 +143,7 @@ namespace Lucene.Net.Search
             return DocIdSet.EMPTY_DOCIDSET;
         }
 
-        /** Provide a SortedVIntList when it is definitely smaller
+        /* Provide a SortedVIntList when it is definitely smaller
          * than an OpenBitSet.
          * @deprecated Either use CachingWrapperFilter, or
          * switch to a different DocIdSet implementation yourself. 

Modified: lucene.net/trunk/src/contrib/Queries/DuplicateFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Queries/DuplicateFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Queries/DuplicateFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Queries/DuplicateFilter.cs Fri Oct  5 21:22:51 2012
@@ -30,7 +30,7 @@ namespace Lucene.Net.Search
     {
         String fieldName;
 
-        /**
+        /*
          * KeepMode determines which document id to consider as the master, all others being 
          * identified as duplicates. Selecting the "first occurrence" can potentially save on IO.
          */
@@ -38,7 +38,7 @@ namespace Lucene.Net.Search
         public static int KM_USE_FIRST_OCCURRENCE = 1;
         public static int KM_USE_LAST_OCCURRENCE = 2;
 
-        /**
+        /*
          * "Full" processing mode starts by setting all bits to false and only setting bits
          * for documents that contain the given field and are identified as none-duplicates. 
 

Modified: lucene.net/trunk/src/contrib/Queries/FilterClause.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Queries/FilterClause.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Queries/FilterClause.cs (original)
+++ lucene.net/trunk/src/contrib/Queries/FilterClause.cs Fri Oct  5 21:22:51 2012
@@ -22,7 +22,7 @@ using System.Text;
 
 namespace Lucene.Net.Search
 {
-    /**
+    /*
      * A Filter that wrapped with an indication of how that filter
      * is used when composed with another filter.
      * (Follows the boolean logic in BooleanClause for composition 
@@ -34,7 +34,7 @@ namespace Lucene.Net.Search
         Occur occur;
         Filter filter;
 
-        /**
+        /*
          * Create a new FilterClause
          * @param filter A Filter object containing a BitSet
          * @param occur A parameter implementation indicating SHOULD, MUST or MUST NOT
@@ -45,7 +45,7 @@ namespace Lucene.Net.Search
             this.filter = filter;
         }
 
-        /**
+        /*
          * Returns this FilterClause's filter
          * @return A Filter object
          */
@@ -55,7 +55,7 @@ namespace Lucene.Net.Search
             get { return filter; }
         }
 
-        /**
+        /*
          * Returns this FilterClause's occur parameter
          * @return An Occur object
          */

Modified: lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs (original)
+++ lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs Fri Oct  5 21:22:51 2012
@@ -100,7 +100,7 @@ namespace Lucene.Net.Search
         }
 
 
-        /**
+        /*
          * 
          * <param name="maxNumTerms">The total number of terms clauses that will appear once rewritten as a BooleanQuery</param>
          * <param name="analyzer"></param>
@@ -174,7 +174,7 @@ namespace Lucene.Net.Search
 
         }
 
-        /**
+        /*
          * <summary>Adds user input for "fuzzification" </summary>
          * <param name="queryString">The string which will be parsed by the analyzer and for which fuzzy variants will be parsed</param>
          * <param name="fieldName"></param>

Modified: lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs (original)
+++ lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs Fri Oct  5 21:22:51 2012
@@ -113,15 +113,15 @@ namespace Lucene.Net.Search.Similar
     /// may want to call the other set methods to control how the similarity queries are
     /// generated:
     /// <ul>
-    /// <li> <see cref="SetMinTermFreq"/> </li>
-    /// <li> <see cref="SetMinDocFreq"/> </li>
-    /// <li> <see cref="SetMaxDocFreq"/></li>
-    /// <li> <see cref="SetMaxDocFreqPct"/></li>
-    /// <li> <see cref="SetMinWordLen"/> </li>
-    /// <li> <see cref="SetMaxWordLen"/></li>
-    /// <li> <see cref="SetMaxQueryTerms"/></li>
-    /// <li> <see cref="SetMaxNumTokensParsed"/></li>
-    /// <li> <see cref="SetStopWords"/> </li>
+    /// <li> <see cref="MinTermFreq"/> </li>
+    /// <li> <see cref="MinDocFreq"/> </li>
+    /// <li> <see cref="MaxDocFreq"/></li>
+    /// <li> <see cref="SetMaxDocFreqPct(int)"/></li>
+    /// <li> <see cref="MinWordLen"/> </li>
+    /// <li> <see cref="MaxWordLen"/></li>
+    /// <li> <see cref="MaxQueryTerms"/></li>
+    /// <li> <see cref="MaxNumTokensParsed"/></li>
+    /// <li> <see cref="SetStopWords(ISet{string})"/> </li>
     /// </ul> 
     /// 
     /// <hr/>
@@ -139,42 +139,41 @@ namespace Lucene.Net.Search.Similar
     {
 
         /// <summary> Default maximum number of tokens to parse in each example doc field that is not stored with TermVector support.</summary>
-        /// <seealso cref="GetMaxNumTokensParsed">
+        /// <seealso cref="MaxNumTokensParsed">
         /// </seealso>
         public const int DEFAULT_MAX_NUM_TOKENS_PARSED = 5000;
 
 
         /// <summary> Default analyzer to parse source doc with.</summary>
-        /// <seealso cref="GetAnalyzer">
+        /// <seealso cref="Analyzer">
         /// </seealso>
         public static readonly Analyzer DEFAULT_ANALYZER = new StandardAnalyzer(Util.Version.LUCENE_CURRENT);
 
         /// <summary> Ignore terms with less than this frequency in the source doc.</summary>
-        /// <seealso cref="GetMinTermFreq">
+        /// <seealso cref="MinTermFreq">
         /// </seealso>
-        /// <seealso cref="SetMinTermFreq">
+        /// <seealso cref="MinTermFreq">
         /// </seealso>
         public const int DEFAULT_MIN_TERM_FREQ = 2;
 
         /// <summary> Ignore words which do not occur in at least this many docs.</summary>
-        /// <seealso cref="GetMinDocFreq">
+        /// <seealso cref="MinDocFreq">
         /// </seealso>
-        /// <seealso cref="SetMinDocFreq">
+        /// <seealso cref="MinDocFreq">
         /// </seealso>
         public const int DEFAULT_MIN_DOC_FREQ = 5;
 
         /// <summary>
         /// Ignore words wich occur in more than this many docs
         /// </summary>
-        /// <seealso cref="GetMaxDocFreq"/>
-        /// <seealso cref="SetMaxDocFreq"/>
-        /// <seealso cref="SetMaxDocFreqPct"/>
+        /// <seealso cref="MaxDocFreq"/>
+        /// <seealso cref="MaxDocFreq"/>
         public const int DEFAULT_MAX_DOC_FREQ = int.MaxValue;
 
         /// <summary> Boost terms in query based on score.</summary>
         /// <seealso cref="Boost">
         /// </seealso>
-        /// <seealso cref="SetBoost">
+        /// <seealso cref="Boost">
         /// </seealso>
         public const bool DEFAULT_BOOST = false;
 
@@ -184,16 +183,16 @@ namespace Lucene.Net.Search.Similar
         public static readonly System.String[] DEFAULT_FIELD_NAMES = new System.String[] { "contents" };
 
         /// <summary> Ignore words less than this length or if 0 then this has no effect.</summary>
-        /// <seealso cref="GetMinWordLen">
+        /// <seealso cref="MinWordLen">
         /// </seealso>
-        /// <seealso cref="SetMinWordLen">
+        /// <seealso cref="MinWordLen">
         /// </seealso>
         public const int DEFAULT_MIN_WORD_LENGTH = 0;
 
         /// <summary> Ignore words greater than this length or if 0 then this has no effect.</summary>
-        /// <seealso cref="GetMaxWordLen">
+        /// <seealso cref="MaxWordLen">
         /// </seealso>
-        /// <seealso cref="SetMaxWordLen">
+        /// <seealso cref="MaxWordLen">
         /// </seealso>
         public const int DEFAULT_MAX_WORD_LENGTH = 0;
 
@@ -213,11 +212,11 @@ namespace Lucene.Net.Search.Similar
         /// <summary> Return a Query with no more than this many terms.
         /// 
         /// </summary>
-        /// <seealso cref="BooleanQuery.GetMaxClauseCount">
+        /// <seealso cref="BooleanQuery.MaxClauseCount">
         /// </seealso>
-        /// <seealso cref="GetMaxQueryTerms">
+        /// <seealso cref="MaxQueryTerms">
         /// </seealso>
-        /// <seealso cref="SetMaxQueryTerms">
+        /// <seealso cref="MaxQueryTerms">
         /// </seealso>
         public const int DEFAULT_MAX_QUERY_TERMS = 25;
 
@@ -897,7 +896,7 @@ namespace Lucene.Net.Search.Similar
         /// </returns>
         /// <seealso cref="RetrieveTerms(System.IO.TextReader)">
         /// </seealso>
-        /// <seealso cref="SetMaxQueryTerms">
+        /// <seealso cref="MaxQueryTerms">
         /// </seealso>
         public System.String[] RetrieveInterestingTerms(System.IO.TextReader r)
         {

Modified: lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThisQuery.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThisQuery.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThisQuery.cs (original)
+++ lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThisQuery.cs Fri Oct  5 21:22:51 2012
@@ -27,7 +27,7 @@ using Lucene.Net.Index;
 
 namespace Lucene.Net.Search.Similar
 {
-    /**<summary>
+    /*<summary>
  * A simple wrapper for MoreLikeThis for use in scenarios where a Query object is required eg
  * in custom QueryParser extensions. At query.rewrite() time the reader is used to construct the
  * actual MoreLikeThis object and obtain the real Query object.
@@ -45,7 +45,7 @@ namespace Lucene.Net.Search.Similar
         int minDocFreq = -1;
 
 
-        /**<summary></summary>
+        /*<summary></summary>
          * <param name="moreLikeFields"></param>
          * <param name="likeText"></param>
          * <param name="analyzer"></param>

Modified: lucene.net/trunk/src/contrib/Snowball/LICENSE.txt
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Snowball/LICENSE.txt?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Snowball/LICENSE.txt (original)
+++ lucene.net/trunk/src/contrib/Snowball/LICENSE.txt Fri Oct  5 21:22:51 2012
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs Fri Oct  5 21:22:51 2012
@@ -88,7 +88,7 @@ namespace Lucene.Net.Analysis.Snowball
             internal TokenStream result;
         };
 
-        /** Returns a (possibly reused) {@link StandardTokenizer} filtered by a 
+        /* Returns a (possibly reused) {@link StandardTokenizer} filtered by a 
          * {@link StandardFilter}, a {@link LowerCaseFilter}, 
          * a {@link StopFilter}, and a {@link SnowballFilter} */
 

Modified: lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs Fri Oct  5 21:22:51 2012
@@ -56,7 +56,7 @@ using Among = SF.Snowball.Among;
 using SnowballProgram = SF.Snowball.SnowballProgram;
 namespace SF.Snowball.Ext
 {
-    /**
+    /*
 	 * Generated class implementing code defined by a snowball script.
 	 */
     public class HungarianStemmer : SnowballProgram

Modified: lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs Fri Oct  5 21:22:51 2012
@@ -57,7 +57,7 @@ using SnowballProgram = SF.Snowball.Snow
 namespace SF.Snowball.Ext
 {
 
-    /**
+    /*
 	 * Generated class implementing code defined by a snowball script.
 	 */
     public class PortugueseStemmer : SnowballProgram

Modified: lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs Fri Oct  5 21:22:51 2012
@@ -56,7 +56,7 @@ using Among = SF.Snowball.Among;
 using SnowballProgram = SF.Snowball.SnowballProgram;
 namespace SF.Snowball.Ext
 {
-    /**
+    /*
  * Generated class implementing code defined by a snowball script.
  */
     public class RomanianStemmer : SnowballProgram

Modified: lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs Fri Oct  5 21:22:51 2012
@@ -56,7 +56,7 @@ using Among = SF.Snowball.Among;
 using SnowballProgram = SF.Snowball.SnowballProgram;
 namespace SF.Snowball.Ext
 {
-    	/** Generated class implementing code defined by a snowball script.
+    	/* Generated class implementing code defined by a snowball script.
 	*
 	*/
     public class TurkishStemmer : SnowballProgram

Modified: lucene.net/trunk/src/contrib/Spatial/BBox/AreaSimilarity.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/BBox/AreaSimilarity.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/BBox/AreaSimilarity.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/BBox/AreaSimilarity.cs Fri Oct  5 21:22:51 2012
@@ -22,36 +22,34 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.BBox
 {
-	/// <summary>
-	/**
-	 * The algorithm is implemented as envelope on envelope overlays rather than
-	 * complex polygon on complex polygon overlays.
-	 * <p/>
-	 * <p/>
-	 * Spatial relevance scoring algorithm:
-	 * <p/>
-	 * <br/>  queryArea = the area of the input query envelope
-	 * <br/>  targetArea = the area of the target envelope (per Lucene document)
-	 * <br/>  intersectionArea = the area of the intersection for the query/target envelopes
-	 * <br/>  queryPower = the weighting power associated with the query envelope (default = 1.0)
-	 * <br/>  targetPower =  the weighting power associated with the target envelope (default = 1.0)
-	 * <p/>
-	 * <br/>  queryRatio  = intersectionArea / queryArea;
-	 * <br/>  targetRatio = intersectionArea / targetArea;
-	 * <br/>  queryFactor  = Math.pow(queryRatio,queryPower);
-	 * <br/>  targetFactor = Math.pow(targetRatio,targetPower);
-	 * <br/>  score = queryFactor * targetFactor;
-	 * <p/>
-	 * Based on Geoportal's
-	 * <a href="http://geoportal.svn.sourceforge.net/svnroot/geoportal/Geoportal/trunk/src/com/esri/gpt/catalog/lucene/SpatialRankingValueSource.java">
-	 *   SpatialRankingValueSource</a>.
-	 *
-	 * @lucene.experimental
-	 */
+	/// <summary>
+    /// The algorithm is implemented as envelope on envelope overlays rather than
+    /// complex polygon on complex polygon overlays.
+    /// <p/>
+    /// <p/>
+    /// Spatial relevance scoring algorithm:
+    /// <p/>
+    /// <br/>  queryArea = the area of the input query envelope
+    /// <br/>  targetArea = the area of the target envelope (per Lucene document)
+    /// <br/>  intersectionArea = the area of the intersection for the query/target envelopes
+    /// <br/>  queryPower = the weighting power associated with the query envelope (default = 1.0)
+    /// <br/>  targetPower =  the weighting power associated with the target envelope (default = 1.0)
+    /// <p/>
+    /// <br/>  queryRatio  = intersectionArea / queryArea;
+    /// <br/>  targetRatio = intersectionArea / targetArea;
+    /// <br/>  queryFactor  = Math.pow(queryRatio,queryPower);
+    /// <br/>  targetFactor = Math.pow(targetRatio,targetPower);
+    /// <br/>  score = queryFactor /// targetFactor;
+    /// <p/>
+    /// Based on Geoportal's
+    /// <a href="http://geoportal.svn.sourceforge.net/svnroot/geoportal/Geoportal/trunk/src/com/esri/gpt/catalog/lucene/SpatialRankingValueSource.java">
+    ///   SpatialRankingValueSource</a>.
+    ///
+    /// @lucene.experimental
 	/// </summary>
 	public class AreaSimilarity : BBoxSimilarity
 	{
-	   /**
+	   /*
 		* Properties associated with the query envelope
 		*/
 		private readonly Rectangle queryExtent;

Modified: lucene.net/trunk/src/contrib/Spatial/BBox/BBoxStrategy.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/BBox/BBoxStrategy.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/BBox/BBoxStrategy.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/BBox/BBoxStrategy.cs Fri Oct  5 21:22:51 2012
@@ -302,7 +302,7 @@ namespace Lucene.Net.Spatial.BBox
 			return this.MakeQuery(new Query[] { xConditions, yConditions }, Occur.SHOULD);
 		}
 
-		/**
+		/*
 		 * Constructs a query to retrieve documents that equal the input envelope.
 		 *
 		 * @return the spatial query
@@ -354,7 +354,7 @@ namespace Lucene.Net.Spatial.BBox
 			return qNotDisjoint;
 		}
 
-		/**
+		/*
 		 * Makes a boolean query based upon a collection of queries and a logical operator.
 		 *
 		 * @param queries the query collection
@@ -371,7 +371,7 @@ namespace Lucene.Net.Spatial.BBox
 			return bq;
 		}
 
-		/**
+		/*
 		 * Constructs a query to retrieve documents are fully within the input envelope.
 		 *
 		 * @return the spatial query
@@ -454,7 +454,7 @@ namespace Lucene.Net.Spatial.BBox
 			return this.MakeQuery(new Query[] { xConditions, yConditions }, Occur.MUST);
 		}
 
-		/**
+		/*
 		 * Constructs a query to retrieve documents that do or do not cross the date line.
 		 *
 		 *
@@ -467,7 +467,7 @@ namespace Lucene.Net.Spatial.BBox
 			return new TermQuery(new Term(field_xdl, crossedDateLine ? "T" : "F"));
 		}
 
-		/**
+		/*
 		 * Constructs a query to retrieve documents that do or do not cross the date line
 		 * and match the supplied spatial query.
 		 *

Modified: lucene.net/trunk/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs Fri Oct  5 21:22:51 2012
@@ -53,7 +53,7 @@ namespace Lucene.Net.Spatial.Prefix
             this.grid = grid;
         }
 
-        /** Used in the in-memory ValueSource as a default ArrayList length for this field's array of values, per doc. */
+        /* Used in the in-memory ValueSource as a default ArrayList length for this field's array of values, per doc. */
 
         public void SetDefaultFieldValuesArrayLen(int defaultFieldValuesArrayLen)
         {

Modified: lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs Fri Oct  5 21:22:51 2012
@@ -59,7 +59,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
         }
 
         /// <summary>
-        /// Any more than this and there's no point (double lat & lon are the same).
+        /// Any more than this and there's no point (double lat and lon are the same).
         /// </summary>
         /// <returns></returns>
         public static int GetMaxLevelsPossible()

Modified: lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/Node.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/Node.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/Node.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/Node.cs Fri Oct  5 21:22:51 2012
@@ -87,7 +87,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 			shapeRel = SpatialRelation.WITHIN;
 		}
 
-		/**
+		/*
 		 * Note: doesn't contain a trailing leaf byte.
 		 */
 		public String GetTokenString()
@@ -128,7 +128,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 		//TODO add getParent() and update some algorithms to use this?
 		//public Cell getParent();
 
-		/**
+		/*
 		 * Like {@link #getSubCells()} but with the results filtered by a shape. If that shape is a {@link com.spatial4j.core.shape.Point} then it
 		 * must call {@link #getSubCell(com.spatial4j.core.shape.Point)};
 		 * Precondition: Never called when getLevel() == maxLevel.
@@ -168,7 +168,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 			return cells;
 		}
 
-		/**
+		/*
 		 * Performant implementations are expected to implement this efficiently by considering the current
 		 * cell's boundary.
 		 * Precondition: Never called when getLevel() == maxLevel.
@@ -178,7 +178,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 
 		//TODO Cell getSubCell(byte b)
 
-		/**
+		/*
 		 * Gets the cells at the next grid cell level that cover this cell.
 		 * Precondition: Never called when getLevel() == maxLevel.
 		 *
@@ -186,7 +186,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 		 */
 		public abstract IList<Node> GetSubCells();
 
-		/**
+		/*
 		 * {@link #getSubCells()}.size() -- usually a constant. Should be >=2
 		 */
 		public abstract int GetSubCellsSize();

Modified: lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs Fri Oct  5 21:22:51 2012
@@ -75,7 +75,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 		//[NotSerialized]
 		private Node worldNode;//cached
 
-		/**
+		/*
 		 * Returns the level 0 cell which encompasses all spatial data. Equivalent to {@link #getNode(String)} with "".
 		 * This cell is threadsafe, just like a spatial prefix grid is, although cells aren't
 		 * generally threadsafe.
@@ -90,7 +90,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 			return worldNode;
 		}
 
-		/**
+		/*
 		 * The cell for the specified token. The empty string should be equal to {@link #getWorldNode()}.
 		 * Precondition: Never called when token length > maxLevel.
 		 */
@@ -125,7 +125,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 			return GetNodes(p, level, false).ElementAt(0);
 		}
 
-		/**
+		/*
 		 * Gets the intersecting & including cells for the specified shape, without exceeding detail level.
 		 * The result is a set of cells (no dups), sorted. Unmodifiable.
 		 * <p/>
@@ -222,7 +222,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 			}
 		}
 
-		/**
+		/*
 		 * Subclasses might override {@link #getNodes(com.spatial4j.core.shape.Shape, int, boolean)}
 		 * and check if the argument is a shape and if so, delegate
 		 * to this implementation, which calls {@link #getNode(com.spatial4j.core.shape.Point, int)} and
@@ -251,7 +251,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
 			return cells;
 		}
 
-		/**
+		/*
 		 * Will add the trailing leaf byte for leaves. This isn't particularly efficient.
 		 */
 		public static List<String> NodesToTokenStrings(Collection<Node> nodes)

Modified: lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs Fri Oct  5 21:22:51 2012
@@ -93,7 +93,7 @@ namespace Lucene.Net.Spatial.Prefix.Tree
             maxLevels = GetLevelForDistance(degrees);
         }
 
-	    /** Calls {@link SpatialPrefixTree#getLevelForDistance(double)}. */
+	    /* Calls {@link SpatialPrefixTree#getLevelForDistance(double)}. */
 		protected abstract int GetLevelForDistance(double degrees);
 
 		protected abstract SpatialPrefixTree NewSPT();

Modified: lucene.net/trunk/src/contrib/Spatial/SpatialStrategy.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/SpatialStrategy.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/SpatialStrategy.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/SpatialStrategy.cs Fri Oct  5 21:22:51 2012
@@ -30,7 +30,7 @@ namespace Lucene.Net.Spatial
 	/// The SpatialStrategy encapsulates an approach to indexing and searching based on shapes.
 	/// <p/>
 	/// Note that a SpatialStrategy is not involved with the Lucene stored field values of shapes, which is
-	/// immaterial to indexing & search.
+	/// immaterial to indexing and search.
 	/// <p/>
 	/// Thread-safe.
 	/// </summary>
@@ -39,11 +39,12 @@ namespace Lucene.Net.Spatial
 		protected readonly SpatialContext ctx;
 		protected readonly string fieldName;
 
-		/// <summary>
-		/// Constructs the spatial strategy with its mandatory arguments.
-		/// </summary>
-		/// <param name="ctx"></param>
-		protected SpatialStrategy(SpatialContext ctx, string fieldName)
+	    /// <summary>
+	    /// Constructs the spatial strategy with its mandatory arguments.
+	    /// </summary>
+	    /// <param name="ctx"></param>
+	    /// <param name="fieldName"> </param>
+	    protected SpatialStrategy(SpatialContext ctx, string fieldName)
 		{
 			if (ctx == null)
 				throw new ArgumentException("ctx is required", "ctx");
@@ -69,7 +70,7 @@ namespace Lucene.Net.Spatial
 		}
 
 		/// <summary>
-		/// Returns the IndexableField(s) from the <code>shape</code> that are to be
+		/// Returns the IndexableField(s) from the <c>shape</c> that are to be
 		/// added to the {@link org.apache.lucene.document.Document}.  These fields
 		/// are expected to be marked as indexed and not stored.
 		/// <p/>
@@ -124,7 +125,7 @@ namespace Lucene.Net.Spatial
         /// <summary>
         /// Returns a ValueSource with values ranging from 1 to 0, depending inversely
         /// on the distance from {@link #makeDistanceValueSource(com.spatial4j.core.shape.Point)}.
-        /// The formula is <code>c/(d + c)</code> where 'd' is the distance and 'c' is
+        /// The formula is <c>c/(d + c)</c> where 'd' is the distance and 'c' is
         /// one tenth the distance to the farthest edge from the center. Thus the
         /// scores will be 1 for indexed points at the center of the query shape and as
         /// low as ~0.1 at its furthest edges.

Modified: lucene.net/trunk/src/contrib/Spatial/Util/CompatibilityExtensions.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Util/CompatibilityExtensions.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Util/CompatibilityExtensions.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Util/CompatibilityExtensions.cs Fri Oct  5 21:22:51 2012
@@ -120,7 +120,7 @@ namespace Lucene.Net.Spatial.Util
 			return res;
 		}
 
-		/** table of number of leading zeros in a byte */
+		/* table of number of leading zeros in a byte */
 		public static readonly byte[] nlzTable = { 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
 
 		/// <summary>
@@ -178,14 +178,14 @@ namespace Lucene.Net.Spatial.Util
 		internal readonly String field;        // which Fieldable
 		internal readonly Object custom;       // which custom comparator or parser
 
-		/** Creates one of these objects for a custom comparator/parser. */
+		/* Creates one of these objects for a custom comparator/parser. */
 		public Entry(String field, Object custom)
 		{
 			this.field = field;
 			this.custom = custom;
 		}
 
-		/** Two of these are equal iff they reference the same field and type. */
+		/* Two of these are equal iff they reference the same field and type. */
 		public override bool Equals(Object o)
 		{
 			var other = o as Entry;
@@ -206,7 +206,7 @@ namespace Lucene.Net.Spatial.Util
 			return false;
 		}
 
-		/** Composes a hashcode based on the field and type. */
+		/* Composes a hashcode based on the field and type. */
 		public override int GetHashCode()
 		{
 			return field.GetHashCode() ^ (custom == null ? 0 : custom.GetHashCode());

Modified: lucene.net/trunk/src/contrib/Spatial/Util/FixedBitSet.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Util/FixedBitSet.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Util/FixedBitSet.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Util/FixedBitSet.cs Fri Oct  5 21:22:51 2012
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -22,7 +22,7 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Spatial.Util
 {
-	/** BitSet of fixed length (numBits), backed by accessible
+	/* BitSet of fixed length (numBits), backed by accessible
  *  ({@link #getBits}) long[], accessed with an int index,
  *  implementing Bits and DocIdSet.  Unlike {@link
  *  OpenBitSet} this bit set does not auto-expand, cannot
@@ -143,7 +143,7 @@ namespace Lucene.Net.Spatial.Util
 			return -1;
 		}
 
-		/** Returns the index of the last set bit before or on the index specified.
+		/* Returns the index of the last set bit before or on the index specified.
 		 *  -1 is returned if there are no more set bits.
 		 */
 		public int PrevSetBit(int index)
@@ -159,7 +159,7 @@ namespace Lucene.Net.Spatial.Util
 			return -1;
 		}
 
-		/** Does in-place OR of the bits provided by the
+		/* Does in-place OR of the bits provided by the
 		 *  iterator. */
 		//public void Or(DocIdSetIterator iter)
 		//{
@@ -181,7 +181,7 @@ namespace Lucene.Net.Spatial.Util
 		//    }
 		//}
 
-		/** this = this OR other */
+		/* this = this OR other */
 		public void Or(FixedBitSet other)
 		{
 			Or(other.bits, other.bits.Length);
@@ -197,7 +197,7 @@ namespace Lucene.Net.Spatial.Util
 			}
 		}
 
-		/** Does in-place AND of the bits provided by the
+		/* Does in-place AND of the bits provided by the
 		 *  iterator. */
 		//public void And(DocIdSetIterator iter)
 		//{
@@ -226,7 +226,7 @@ namespace Lucene.Net.Spatial.Util
 		//    }
 		//}
 
-		/** this = this AND other */
+		/* this = this AND other */
 		public void And(FixedBitSet other)
 		{
 			And(other.bits, other.bits.Length);
@@ -249,7 +249,7 @@ namespace Lucene.Net.Spatial.Util
 			}
 		}
 
-		/** Does in-place AND NOT of the bits provided by the
+		/* Does in-place AND NOT of the bits provided by the
 		 *  iterator. */
 		//public void AndNot(DocIdSetIterator iter)
 		//{
@@ -271,7 +271,7 @@ namespace Lucene.Net.Spatial.Util
 		//    }
 		//}
 
-		/** this = this AND NOT other */
+		/* this = this AND NOT other */
 		public void AndNot(FixedBitSet other)
 		{
 			AndNot(other.bits, other.bits.Length);
@@ -291,7 +291,7 @@ namespace Lucene.Net.Spatial.Util
 		// typically isEmpty is low cost, but this one wouldn't
 		// be)
 
-		/** Flips a range of bits
+		/* Flips a range of bits
 		 *
 		 * @param startIndex lower index
 		 * @param endIndex one-past the last bit to flip
@@ -306,7 +306,7 @@ namespace Lucene.Net.Spatial.Util
 		//  int startWord = startIndex >> 6;
 		//  int endWord = (endIndex-1) >> 6;
 
-		//  /*** Grrr, java shifting wraps around so -1L>>>64 == -1
+		//  /* Grrr, java shifting wraps around so -1L>>>64 == -1
 		//   * for that reason, make sure not to use endmask if the bits to flip will
 		//   * be zero in the last word (redefine endWord to be the last changed...)
 		//  long startmask = -1L << (startIndex & 0x3f);     // example: 11111...111000
@@ -330,7 +330,7 @@ namespace Lucene.Net.Spatial.Util
 		//  bits[endWord] ^= endmask;
 		//}
 
-		/** Sets a range of bits
+		/* Sets a range of bits
 		 *
 		 * @param startIndex lower index
 		 * @param endIndex one-past the last bit to set
@@ -367,7 +367,7 @@ namespace Lucene.Net.Spatial.Util
 		//  bits[endWord] |= endmask;
 		//}
 
-		/** Clears a range of bits.
+		/* Clears a range of bits.
 		 *
 		 * @param startIndex lower index
 		 * @param endIndex one-past the last bit to clear
@@ -386,7 +386,7 @@ namespace Lucene.Net.Spatial.Util
 			return new FixedBitSet(this);
 		}
 
-		/** returns true if both sets have the same bits set */
+		/* returns true if both sets have the same bits set */
 		public override bool Equals(Object o)
 		{
 			if (this == o)

Modified: lucene.net/trunk/src/contrib/Spatial/Util/FunctionQuery.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Util/FunctionQuery.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Util/FunctionQuery.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Util/FunctionQuery.cs Fri Oct  5 21:22:51 2012
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: lucene.net/trunk/src/contrib/Spatial/Util/ValueSourceFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Spatial/Util/ValueSourceFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Spatial/Util/ValueSourceFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Spatial/Util/ValueSourceFilter.cs Fri Oct  5 21:22:51 2012
@@ -23,7 +23,7 @@ namespace Lucene.Net.Spatial.Util
 {
     /// <summary>
     /// Filter that matches all documents where a valuesource is
-    /// in between a range of <code>min</code> and <code>max</code> inclusive.
+    /// in between a range of <c>min</c> and <c>max</c> inclusive.
     /// </summary>
 	public class ValueSourceFilter : Filter
 	{

Modified: lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs (original)
+++ lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs Fri Oct  5 21:22:51 2012
@@ -117,7 +117,7 @@ namespace SpellChecker.Net.Search.Spell
 
         /// <summary>
         /// Use a different index as the spell checker index or re-open
-        /// the existing index if <code>spellIndex</code> is the same value
+        /// the existing index if <c>spellIndex</c> is the same value
         /// as given in the constructor.
         /// </summary>
         /// <param name="spellIndexDir">spellIndexDir the spell directory to use </param>
@@ -580,11 +580,11 @@ namespace SpellChecker.Net.Search.Spell
         }
 
         /// <summary>
-        /// Returns <code>true</code> if and only if the <see cref="SpellChecker"/> is
-        /// closed, otherwise <code>false</code>.
+        /// Returns <c>true</c> if and only if the <see cref="SpellChecker"/> is
+        /// closed, otherwise <c>false</c>.
         /// </summary>
-        /// <returns><code>true</code> if and only if the <see cref="SpellChecker"/> is
-        ///         closed, otherwise <code>false</code>.
+        /// <returns><c>true</c> if and only if the <see cref="SpellChecker"/> is
+        ///         closed, otherwise <c>false</c>.
         ///</returns>
         bool IsClosed()
         {

Modified: lucene.net/trunk/src/contrib/SpellChecker/Spell/TRStringDistance.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/SpellChecker/Spell/TRStringDistance.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/SpellChecker/Spell/TRStringDistance.cs (original)
+++ lucene.net/trunk/src/contrib/SpellChecker/Spell/TRStringDistance.cs Fri Oct  5 21:22:51 2012
@@ -38,9 +38,9 @@ namespace SpellChecker.Net.Search.Spell
         }
 		
 		
-        //*****************************
+        //***************************
         // Compute Levenshtein distance
-        //*****************************
+        //***************************
         public int GetDistance(System.String other)
         {
             int[][] d; // matrix
@@ -115,9 +115,9 @@ namespace SpellChecker.Net.Search.Spell
         }
 		
 		
-        //****************************
+        //**************************
         // Get minimum of three values
-        //****************************
+        //**************************
         private static int Min3(int a, int b, int c)
         {
             int mi = a;

Modified: lucene.net/trunk/src/core/Analysis/Analyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/Analyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/Analyzer.cs (original)
+++ lucene.net/trunk/src/core/Analysis/Analyzer.cs Fri Oct  5 21:22:51 2012
@@ -84,7 +84,7 @@ namespace Lucene.Net.Analysis
 		/// and override tokenStream but not reusableTokenStream 
 		/// </deprecated>
 		/// <summary>
-        /// Java uses Class&lt;? extends Analyer&gt; to contrain <paramref="baseClass"/> to
+        /// Java uses Class&lt;? extends Analyer&gt; to constrain <typeparamref name="TClass"/> to
         /// only Types that inherit from Analyzer.  C# does not have a generic type class,
         /// ie Type&lt;t&gt;.  The method signature stays the same, and an exception may
         /// still be thrown, if the method doesn't exist.

Modified: lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs (original)
+++ lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs Fri Oct  5 21:22:51 2012
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis
         {
         }
 
-        /** Retrieve the corrected offset. */
+        /* Retrieve the corrected offset. */
         //@Override
         protected internal override int Correct(int currentOff)
         {

Modified: lucene.net/trunk/src/core/Analysis/CharArraySet.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/CharArraySet.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/CharArraySet.cs (original)
+++ lucene.net/trunk/src/core/Analysis/CharArraySet.cs Fri Oct  5 21:22:51 2012
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis
             this._Count = count;
         }
 
-        /// <summary>true if the <code>len</code> chars of <code>text</code> starting at <code>off</code>
+        /// <summary>true if the <c>len</c> chars of <c>text</c> starting at <c>off</c>
         /// are in the set 
         /// </summary>
         public virtual bool Contains(char[] text, int off, int len)

Modified: lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs (original)
+++ lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs Fri Oct  5 21:22:51 2012
@@ -54,7 +54,7 @@ namespace Lucene.Net.Analysis.Standard
 		public static readonly ISet<string> STOP_WORDS_SET;
 		private Version matchVersion;
 		
-		/// <summary>Builds an analyzer with the default stop words (<see cref="STOP_WORDS" />).
+		/// <summary>Builds an analyzer with the default stop words (<see cref="STOP_WORDS_SET" />).
 		/// </summary>
 		/// <param name="matchVersion">Lucene version to match see <see cref="Version">above</see></param>
 		public StandardAnalyzer(Version matchVersion)

Modified: lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs (original)
+++ lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs Fri Oct  5 21:22:51 2012
@@ -154,8 +154,7 @@ namespace Lucene.Net.Analysis.Standard
 		
 		///<summary>
 		/// (non-Javadoc)
-		///
-		/// <see cref="Lucene.Net.Analysis.TokenStream.Next()" />
+		/// <see cref="Lucene.Net.Analysis.TokenStream.IncrementToken()" />
         ///</summary>
 		public override bool IncrementToken()
 		{

Modified: lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs (original)
+++ lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs Fri Oct  5 21:22:51 2012
@@ -258,7 +258,7 @@ namespace Lucene.Net.Analysis.Standard
 			return yychar;
 		}
 
-        /**
+        /*
         * Resets the Tokenizer to a new Reader.
         */
         internal void Reset(System.IO.TextReader r)

Modified: lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.jflex
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.jflex?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.jflex (original)
+++ lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.jflex Fri Oct  5 21:22:51 2012
@@ -1,6 +1,6 @@
 package org.apache.lucene.analysis.standard;
 
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -51,7 +51,7 @@ public static final int EMAIL           
 public static final int HOST              = StandardTokenizer.HOST;
 public static final int NUM               = StandardTokenizer.NUM;
 public static final int CJ                = StandardTokenizer.CJ;
-/**
+/*
  * @deprecated this solves a bug where HOSTs that end with '.' are identified
  *             as ACRONYMs.
  */
@@ -64,7 +64,7 @@ public final int yychar()
     return yychar;
 }
 
-/**
+/*
  * Resets the Tokenizer to a new Reader.
  */
 final void reset(java.io.Reader r) {
@@ -75,14 +75,14 @@ final void reset(java.io.Reader r) {
   yyreset(r);
 }
 
-/**
+/*
  * Fills Lucene token with the current token text.
  */
 final void getText(Token t) {
   t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
 }
 
-/**
+/*
  * Fills TermAttribute with the current token text.
  */
 final void getText(TermAttribute t) {
@@ -152,5 +152,5 @@ WHITESPACE = \r\n | [ \r\n\t\f]
 {CJ}                                                           { return CJ; }
 {ACRONYM_DEP}                                                  { return ACRONYM_DEP; }
 
-/** Ignore the rest */
+/* Ignore the rest */
 . | {WHITESPACE}                                               { /* ignore */ }

Modified: lucene.net/trunk/src/core/Analysis/StopFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/StopFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/StopFilter.cs (original)
+++ lucene.net/trunk/src/core/Analysis/StopFilter.cs Fri Oct  5 21:22:51 2012
@@ -70,7 +70,7 @@ namespace Lucene.Net.Analysis
 		/// TokenStream that are named in the Set.
 		/// </summary>
 		/// <param name="enablePositionIncrements">true if token positions should record the removed stop words</param>
-		///  <param name="in_Renamed">Input stream</param>
+		///  <param name="in">Input stream</param>
 		/// <param name="stopWords">A Set of strings or char[] or any other ToString()-able set representing the stopwords</param>
 		/// <seealso cref="MakeStopSet(String[])"/>
 		public StopFilter(bool enablePositionIncrements, TokenStream @in, ISet<string> stopWords)
@@ -83,8 +83,7 @@ namespace Lucene.Net.Analysis
 		/// an Analyzer is constructed.
 		/// 
 		/// </summary>
-		/// <seealso cref="MakeStopSet(String[], bool)"> passing false to ignoreCase
-		/// </seealso>
+		/// <seealso cref="MakeStopSet(String[], bool)">passing false to ignoreCase</seealso>
 		public static ISet<string> MakeStopSet(params string[] stopWords)
 		{
 			return MakeStopSet(stopWords, false);

Modified: lucene.net/trunk/src/core/Analysis/Token.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/Token.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/Token.cs (original)
+++ lucene.net/trunk/src/core/Analysis/Token.cs Fri Oct  5 21:22:51 2012
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis
 	/// with type "eos".  The default token type is "word".  
 	/// <p/>
 	/// A Token can optionally have metadata (a.k.a. Payload) in the form of a variable
-	/// length byte array. Use <see cref="TermPositions.GetPayloadLength()" /> and 
+	/// length byte array. Use <see cref="TermPositions.PayloadLength" /> and 
 	/// <see cref="TermPositions.GetPayload(byte[], int)" /> to retrieve the payloads from the index.
 	/// </summary>
 	/// <summary><br/><br/>
@@ -618,9 +618,9 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary>Shorthand for calling <see cref="Clear" />,
 		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetStartOffset" />,
-		/// <see cref="SetEndOffset" />,
-		/// <see cref="SetType" />
+		/// <see cref="StartOffset" />,
+		/// <see cref="EndOffset" />,
+		/// <see cref="Type" />
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -638,9 +638,9 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary>Shorthand for calling <see cref="Clear" />,
 		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetStartOffset" />,
-		/// <see cref="SetEndOffset" />
-		/// <see cref="SetType" /> on Token.DEFAULT_TYPE
+		/// <see cref="StartOffset" />,
+		/// <see cref="EndOffset" />
+		/// <see cref="Type" /> on Token.DEFAULT_TYPE
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -656,9 +656,9 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary>Shorthand for calling <see cref="Clear" />,
 		/// <see cref="SetTermBuffer(String)" />,
-		/// <see cref="SetStartOffset" />,
-		/// <see cref="SetEndOffset" />
-		/// <see cref="SetType" />
+		/// <see cref="StartOffset" />,
+		/// <see cref="EndOffset" />
+		/// <see cref="Type" />
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -674,9 +674,9 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary>Shorthand for calling <see cref="Clear" />,
 		/// <see cref="SetTermBuffer(String, int, int)" />,
-		/// <see cref="SetStartOffset" />,
-		/// <see cref="SetEndOffset" />
-		/// <see cref="SetType" />
+		/// <see cref="StartOffset" />,
+		/// <see cref="EndOffset" />
+		/// <see cref="Type" />
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -692,9 +692,9 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary>Shorthand for calling <see cref="Clear" />,
 		/// <see cref="SetTermBuffer(String)" />,
-		/// <see cref="SetStartOffset" />,
-		/// <see cref="SetEndOffset" />
-		/// <see cref="SetType" /> on Token.DEFAULT_TYPE
+		/// <see cref="StartOffset" />,
+		/// <see cref="EndOffset" />
+		/// <see cref="Type" /> on Token.DEFAULT_TYPE
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -710,9 +710,9 @@ namespace Lucene.Net.Analysis
 		
 		/// <summary>Shorthand for calling <see cref="Clear" />,
 		/// <see cref="SetTermBuffer(String, int, int)" />,
-		/// <see cref="SetStartOffset" />,
-		/// <see cref="SetEndOffset" />
-		/// <see cref="SetType" /> on Token.DEFAULT_TYPE
+		/// <see cref="StartOffset" />,
+		/// <see cref="EndOffset" />
+		/// <see cref="Type" /> on Token.DEFAULT_TYPE
 		/// </summary>
 		/// <returns> this Token instance 
 		/// </returns>
@@ -802,7 +802,7 @@ namespace Lucene.Net.Analysis
 		}
        
         ///<summary>
-        /// Convenience factory that returns <code>Token</code> as implementation for the basic
+        /// Convenience factory that returns <c>Token</c> as implementation for the basic
         /// attributes and return the default impl (with &quot;Impl&quot; appended) for all other
         /// attributes.
         /// @since 3.0

Modified: lucene.net/trunk/src/core/Analysis/TokenStream.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Analysis/TokenStream.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Analysis/TokenStream.cs (original)
+++ lucene.net/trunk/src/core/Analysis/TokenStream.cs Fri Oct  5 21:22:51 2012
@@ -102,7 +102,7 @@ namespace Lucene.Net.Analysis
 	    /// 
 	    /// This method is called for every token of a document, so an efficient
 	    /// implementation is crucial for good performance. To avoid calls to
-	    /// <see cref="AttributeSource.AddAttribute(Type)" /> and <see cref="AttributeSource.GetAttribute(Type)" />,
+	    /// <see cref="AttributeSource.AddAttribute{T}()" /> and <see cref="AttributeSource.GetAttribute{T}()" />,
 	    /// references to all <see cref="Util.Attribute" />s that this stream uses should be
 	    /// retrieved during instantiation.
 	    /// 

Modified: lucene.net/trunk/src/core/Document/AbstractField.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Document/AbstractField.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Document/AbstractField.cs (original)
+++ lucene.net/trunk/src/core/Document/AbstractField.cs Fri Oct  5 21:22:51 2012
@@ -72,7 +72,7 @@ namespace Lucene.Net.Documents
 			SetStoreTermVector(termVector);
 		}
 
-	    /// <summary>Returns the boost factor for hits for this field.
+	    /// <summary>Gets or sets the boost factor for hits for this field.
 	    /// 
 	    /// <p/>The default value is 1.0.
 	    /// 
@@ -80,10 +80,7 @@ namespace Lucene.Net.Documents
 	    /// Documents returned from <see cref="Lucene.Net.Index.IndexReader.Document(int)" /> and
 	    /// <see cref="Lucene.Net.Search.Searcher.Doc(int)" /> may thus not have the same value present as when
 	    /// this field was indexed.
-	    /// 
 	    /// </summary>
-	    /// <seealso cref="SetBoost(float)">
-	    /// </seealso>
 	    public virtual float Boost
 	    {
 	        get { return internalBoost; }
@@ -167,7 +164,7 @@ namespace Lucene.Net.Documents
 
 
 	    /// <summary> Return the raw byte[] for the binary field.  Note that
-	    /// you must also call <see cref="GetBinaryLength" /> and <see cref="GetBinaryOffset" />
+	    /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />
 	    /// to know which range of bytes in this
 	    /// returned array belong to the field.
 	    /// </summary>

Modified: lucene.net/trunk/src/core/Document/Field.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Document/Field.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Document/Field.cs (original)
+++ lucene.net/trunk/src/core/Document/Field.cs Fri Oct  5 21:22:51 2012
@@ -72,8 +72,8 @@ namespace Lucene.Net.Documents
             
             /// <summary>Expert: Index the field's value without an Analyzer,
             /// and also disable the storing of norms.  Note that you
-            /// can also separately enable/disable norms by calling
-            /// <see cref="AbstractField.SetOmitNorms" />.  No norms means that
+            /// can also separately enable/disable norms by setting
+            /// <see cref="AbstractField.OmitNorms" />.  No norms means that
             /// index-time field and document boosting and field
             /// length normalization are disabled.  The benefit is
             /// less memory usage as norms take up one byte of RAM

Modified: lucene.net/trunk/src/core/Document/Fieldable.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Document/Fieldable.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Document/Fieldable.cs (original)
+++ lucene.net/trunk/src/core/Document/Fieldable.cs Fri Oct  5 21:22:51 2012
@@ -57,7 +57,7 @@ namespace Lucene.Net.Documents
         /// this field was indexed.
         /// 
 	    /// </summary>
-	    /// <seealso cref="Lucene.Net.Documents.Document.SetBoost(float)">
+	    /// <seealso cref="Lucene.Net.Documents.Document.Boost">
 	    /// </seealso>
 	    /// <seealso cref="Lucene.Net.Search.Similarity.ComputeNorm(String, FieldInvertState)">
 	    /// </seealso>

Modified: lucene.net/trunk/src/core/Index/CheckIndex.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Index/CheckIndex.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Index/CheckIndex.cs (original)
+++ lucene.net/trunk/src/core/Index/CheckIndex.cs Fri Oct  5 21:22:51 2012
@@ -103,7 +103,7 @@ namespace Lucene.Net.Index
 			/// <summary>How many bad segments were found. </summary>
 			public int numBadSegments;
 			
-			/// <summary>True if we checked only specific segments (<see cref="CheckIndex_Renamed_Method(System.Collections.IList)" />)
+			/// <summary>True if we checked only specific segments (<see cref="CheckIndex.CheckIndex_Renamed_Method(List{string})" />)
 			/// was called with non-null
 			/// argument). 
 			/// </summary>
@@ -173,7 +173,7 @@ namespace Lucene.Net.Index
 				/// <summary>True if at least one of the fields in this segment
 				/// does not omitTermFreqAndPositions.
 				/// </summary>
-				/// <seealso cref="AbstractField.SetOmitTermFreqAndPositions">
+				/// <seealso cref="AbstractField.OmitTermFreqAndPositions">
 				/// </seealso>
 				public bool hasProx;
 

Modified: lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs (original)
+++ lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs Fri Oct  5 21:22:51 2012
@@ -24,7 +24,7 @@ namespace Lucene.Net.Index
 	
 	/// <summary>A <see cref="MergeScheduler" /> that runs each merge using a
 	/// separate thread, up until a maximum number of threads
-	/// (<see cref="SetMaxThreadCount" />) at which when a merge is
+	/// (<see cref="MaxThreadCount" />) at which when a merge is
 	/// needed, the thread(s) that are updating the index will
 	/// pause until one or more merges completes.  This is a
 	/// simple way to use concurrency in the indexing process

Modified: lucene.net/trunk/src/core/Index/DocumentsWriter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Index/DocumentsWriter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Index/DocumentsWriter.cs (original)
+++ lucene.net/trunk/src/core/Index/DocumentsWriter.cs Fri Oct  5 21:22:51 2012
@@ -227,7 +227,7 @@ namespace Lucene.Net.Index
 			}
 		}
 		
-        /**
+        /*
         * Create and return a new DocWriterBuffer.
         */
         internal PerDocBuffer NewPerDocBuffer()
@@ -235,7 +235,7 @@ namespace Lucene.Net.Index
             return new PerDocBuffer(this);
         }
 
-        /**
+        /*
         * RAMFile buffer for DocWriters.
         */
         internal class PerDocBuffer : Lucene.Net.Store.RAMFile
@@ -245,7 +245,7 @@ namespace Lucene.Net.Index
             {
                 this.enclosingInstance = enclosingInstance;
             }
-            /**
+            /*
             * Allocate bytes used from shared pool.
             */
             public override byte[] NewBuffer(int size)
@@ -254,7 +254,7 @@ namespace Lucene.Net.Index
                 return enclosingInstance.perDocAllocator.GetByteBlock(false);
             }
 
-            /**
+            /*
             * Recycle the bytes used.
             */
             internal void Recycle()

Modified: lucene.net/trunk/src/core/Index/IndexCommit.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Index/IndexCommit.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Index/IndexCommit.cs (original)
+++ lucene.net/trunk/src/core/Index/IndexCommit.cs Fri Oct  5 21:22:51 2012
@@ -62,8 +62,8 @@ namespace Lucene.Net.Index
 		/// point should be deleted. 
 		/// <p/>
 		/// Decision that a commit-point should be deleted is taken by the <see cref="IndexDeletionPolicy" /> in effect
-        /// and therefore this should only be called by its <see cref="IndexDeletionPolicy.OnInit(System.Collections.IList)" /> or 
-        /// <see cref="IndexDeletionPolicy.OnCommit(System.Collections.IList)" /> methods.
+        /// and therefore this should only be called by its <see cref="IndexDeletionPolicy.OnInit{T}(IList{T})" /> or 
+        /// <see cref="IndexDeletionPolicy.OnCommit{T}(IList{T})" /> methods.
 		/// </summary>
         public abstract void Delete();
 
@@ -90,7 +90,7 @@ namespace Lucene.Net.Index
 		}
 
 	    /// <summary>Returns the version for this IndexCommit.  This is the
-	    /// same value that <see cref="IndexReader.GetVersion" /> would
+	    /// same value that <see cref="IndexReader.Version" /> would
 	    /// return if it were opened on this commit. 
 	    /// </summary>
 	    public abstract long Version { get; }

Modified: lucene.net/trunk/src/core/Index/IndexDeletionPolicy.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/core/Index/IndexDeletionPolicy.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/core/Index/IndexDeletionPolicy.cs (original)
+++ lucene.net/trunk/src/core/Index/IndexDeletionPolicy.cs Fri Oct  5 21:22:51 2012
@@ -72,32 +72,28 @@ namespace Lucene.Net.Index
 		/// sorted by age (the 0th one is the oldest commit).
 		/// </param>
 		void  OnInit<T>(IList<T> commits) where T : IndexCommit;
-		
-		/// <summary> <p/>This is called each time the writer completed a commit.
-		/// This gives the policy a chance to remove old commit points
-		/// with each commit.<p/>
-		/// 
-		/// <p/>The policy may now choose to delete old commit points 
-		/// by calling method <see cref="IndexCommit.Delete()" /> 
-		/// of <see cref="IndexCommit" />.<p/>
-		/// 
-		/// <p/>If writer has <c>autoCommit = true</c> then
-		/// this method will in general be called many times during
-		/// one instance of <see cref="IndexWriter" />.  If
-		/// <c>autoCommit = false</c> then this method is
-		/// only called once when <see cref="IndexWriter.Close()" /> is
-		/// called, or not at all if the <see cref="IndexWriter.Abort" />
-		/// is called. 
-		/// 
-		/// <p/><u>Note:</u> the last CommitPoint is the most recent one,
-		/// i.e. the "front index state". Be careful not to delete it,
-		/// unless you know for sure what you are doing, and unless 
-		/// you can afford to lose the index content while doing that.
-		/// 
-		/// </summary>
-		/// <param name="commits">List of <see cref="IndexCommit" />,
-		/// sorted by age (the 0th one is the oldest commit).
-		/// </param>
+
+        /// <summary>
+        /// <p>This is called each time the writer completed a commit.
+        /// This gives the policy a chance to remove old commit points
+        /// with each commit.</p>
+        ///
+        /// <p>The policy may now choose to delete old commit points 
+        /// by calling method <see cref="IndexCommit.Delete()"/>
+        /// of <see cref="IndexCommit" />.</p>
+        /// 
+        /// <p>This method is only called when <see cref="IndexWriter.Commit()"/>
+        /// or <see cref="IndexWriter.Close()"/> is called, or possibly not at 
+        /// all if the <see cref="IndexWriter.Rollback()"/> is called.</p>
+        ///
+        /// <p><u>Note:</u> the last CommitPoint is the most recent one,
+        /// i.e. the "front index state". Be careful not to delete it,
+        /// unless you know for sure what you are doing, and unless 
+        /// you can afford to lose the index content while doing that.</p>
+        /// </summary>
+        /// <param name="commits">
+        /// List of <see cref="IndexCommit" />, sorted by age (the 0th one is the oldest commit).
+        /// </param>
 		void  OnCommit<T>(IList<T> commits) where T : IndexCommit;
 	}
 }
\ No newline at end of file



Mime
View raw message