lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [3/9] lucenenet git commit: SWEEP: Changed <item></item> to <item><description></description></item> in documentation comments
Date Thu, 01 Jun 2017 22:48:59 GMT
SWEEP: Changed <item></item> to <item><description></description></item> in documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/7099a846
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/7099a846
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/7099a846

Branch: refs/heads/master
Commit: 7099a8465f2dcd9f76c5ee8c2eff11d0a36774d3
Parents: cfeaf28
Author: Shad Storhaug <shad@shadstorhaug.com>
Authored: Thu Jun 1 21:32:26 2017 +0700
Committer: Shad Storhaug <shad@shadstorhaug.com>
Committed: Thu Jun 1 21:32:26 2017 +0700

----------------------------------------------------------------------
 .../Analysis/Ar/ArabicAnalyzer.cs               |  6 +--
 .../Analysis/Ar/ArabicLetterTokenizer.cs        |  4 +-
 .../Analysis/Ar/ArabicNormalizer.cs             | 10 ++---
 .../Analysis/Ar/ArabicStemmer.cs                |  4 +-
 .../Analysis/Ca/CatalanAnalyzer.cs              |  4 +-
 .../Analysis/Cjk/CJKTokenizer.cs                |  6 +--
 .../Analysis/Cjk/CJKWidthFilter.cs              |  4 +-
 .../Analysis/Ckb/SoraniNormalizer.cs            | 12 +++---
 .../Analysis/Cn/ChineseFilter.cs                | 12 +++---
 .../Analysis/Cn/ChineseTokenizer.cs             |  4 +-
 .../Analysis/CommonGrams/CommonGramsFilter.cs   |  8 ++--
 .../CommonGrams/CommonGramsQueryFilter.cs       | 14 +++----
 .../Compound/CompoundWordTokenFilterBase.cs     |  6 +--
 .../DictionaryCompoundWordTokenFilter.cs        |  4 +-
 .../Compound/Hyphenation/TernaryTree.cs         |  4 +-
 .../HyphenationCompoundWordTokenFilter.cs       |  4 +-
 ...HyphenationCompoundWordTokenFilterFactory.cs | 18 ++++----
 .../Analysis/Core/LetterTokenizer.cs            |  4 +-
 .../Analysis/Core/LowerCaseFilter.cs            |  2 +-
 .../Analysis/Core/LowerCaseTokenizer.cs         |  4 +-
 .../Analysis/Core/SimpleAnalyzer.cs             |  4 +-
 .../Analysis/Core/StopAnalyzer.cs               |  6 +--
 .../Analysis/Core/StopFilter.cs                 |  4 +-
 .../Analysis/Core/StopFilterFactory.cs          | 18 ++++----
 .../Analysis/Core/WhitespaceAnalyzer.cs         |  4 +-
 .../Analysis/Core/WhitespaceTokenizer.cs        |  4 +-
 .../Analysis/Cz/CzechAnalyzer.cs                |  8 ++--
 .../Analysis/De/GermanAnalyzer.cs               | 10 ++---
 .../Analysis/De/GermanNormalizationFilter.cs    |  8 ++--
 .../Analysis/De/GermanStemmer.cs                | 12 +++---
 .../Analysis/El/GreekAnalyzer.cs                |  6 +--
 .../Analysis/El/GreekLowerCaseFilter.cs         |  2 +-
 .../Analysis/En/EnglishPossessiveFilter.cs      |  4 +-
 .../Analysis/Es/SpanishAnalyzer.cs              |  2 +-
 .../Analysis/Fa/PersianNormalizer.cs            |  6 +--
 .../Analysis/Fr/FrenchAnalyzer.cs               | 10 ++---
 .../Analysis/Hi/HindiAnalyzer.cs                |  2 +-
 .../Analysis/Hi/HindiNormalizer.cs              |  4 +-
 .../Analysis/It/ItalianAnalyzer.cs              |  6 +--
 .../Analysis/Lv/LatvianStemmer.cs               | 18 ++++----
 .../Miscellaneous/ASCIIFoldingFilter.cs         | 32 +++++++-------
 .../Miscellaneous/WordDelimiterFilter.cs        | 44 ++++++++++----------
 .../Analysis/NGram/EdgeNGramTokenizer.cs        | 12 +++---
 .../Analysis/NGram/NGramTokenFilter.cs          | 10 ++---
 .../Analysis/NGram/NGramTokenizer.cs            |  6 +--
 .../Analysis/Nl/DutchAnalyzer.cs                | 12 +++---
 .../Analysis/Pattern/PatternTokenizer.cs        |  4 +-
 .../Analysis/Pattern/PatternTokenizerFactory.cs |  4 +-
 .../Analysis/Pt/PortugueseAnalyzer.cs           |  2 +-
 .../Analysis/Pt/RSLPStemmerBase.cs              | 28 ++++++-------
 .../Analysis/Reverse/ReverseStringFilter.cs     |  2 +-
 .../Analysis/Ru/RussianAnalyzer.cs              |  4 +-
 .../Analysis/Snowball/SnowballAnalyzer.cs       |  2 +-
 .../Analysis/Snowball/SnowballFilter.cs         |  4 +-
 .../Analysis/Standard/ClassicAnalyzer.cs        | 12 +++---
 .../Analysis/Standard/ClassicTokenizer.cs       | 10 ++---
 .../Analysis/Standard/StandardAnalyzer.cs       | 14 +++----
 .../Analysis/Standard/StandardTokenizer.cs      |  8 ++--
 .../Analysis/Standard/StandardTokenizerImpl.cs  | 16 +++----
 .../Analysis/Standard/UAX29URLEmailTokenizer.cs | 20 ++++-----
 .../Standard/UAX29URLEmailTokenizerImpl.cs      | 20 ++++-----
 .../Analysis/Synonym/SolrSynonymParser.cs       | 14 +++----
 .../Analysis/Synonym/SynonymFilterFactory.cs    |  6 +--
 .../Analysis/Th/ThaiAnalyzer.cs                 |  2 +-
 .../Analysis/Util/AbstractAnalysisFactory.cs    |  8 ++--
 .../Analysis/Util/CharArrayMap.cs               |  4 +-
 .../Analysis/Util/CharTokenizer.cs              |  4 +-
 .../Analysis/Util/WordlistLoader.cs             | 12 +++---
 .../Collation/CollationAttributeFactory.cs      | 12 +++---
 .../Collation/CollationKeyAnalyzer.cs           | 16 +++----
 .../Collation/CollationKeyFilter.cs             | 12 +++---
 .../Collation/CollationKeyFilterFactory.cs      | 20 ++++-----
 .../Taxonomy/ParallelTaxonomyArrays.cs          | 12 +++---
 src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs |  8 ++--
 .../Highlight/TokenSources.cs                   | 24 +++++------
 .../PostingsHighlight/PassageScorer.cs          |  6 +--
 .../PostingsHighlight/PostingsHighlighter.cs    |  8 ++--
 .../Index/Sorter/BlockJoinComparatorSource.cs   |  6 +--
 src/Lucene.Net.Queries/CustomScoreQuery.cs      |  4 +-
 src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs      | 40 +++++++++---------
 .../Classic/QueryParser.cs                      | 16 +++----
 .../Flexible/Core/QueryParserHelper.cs          | 12 +++---
 .../Flexible/Standard/StandardQueryParser.cs    | 16 +++----
 .../Simple/SimpleQueryParser.cs                 | 32 +++++++-------
 .../Surround/Query/SpanNearClauseFactory.cs     |  8 ++--
 .../Queries/SortedSetSortField.cs               |  8 ++--
 .../Prefix/PrefixTreeStrategy.cs                | 20 ++++-----
 src/Lucene.Net.Spatial/SpatialStrategy.cs       | 10 ++---
 .../Vector/PointVectorStrategy.cs               | 10 ++---
 .../Spell/LuceneLevenshteinDistance.cs          | 14 +++----
 .../Suggest/Analyzing/AnalyzingSuggester.cs     | 12 +++---
 .../Suggest/DocumentDictionary.cs               | 12 +++---
 .../Suggest/DocumentValueSourceDictionary.cs    |  8 ++--
 .../Suggest/FileDictionary.cs                   | 18 ++++----
 .../Suggest/Fst/FSTCompletionBuilder.cs         | 26 ++++++------
 src/Lucene.Net/Analysis/Analyzer.cs             | 32 +++++++-------
 src/Lucene.Net/Analysis/Token.cs                | 28 ++++++-------
 .../IPositionIncrementAttribute.cs              |  8 ++--
 src/Lucene.Net/Analysis/TokenStream.cs          | 28 ++++++-------
 src/Lucene.Net/Codecs/Codec.cs                  |  8 ++--
 src/Lucene.Net/Codecs/DocValuesFormat.cs        | 10 ++---
 src/Lucene.Net/Codecs/PostingsFormat.cs         | 10 ++---
 src/Lucene.Net/Document/Field.cs                |  4 +-
 src/Lucene.Net/Index/AutomatonTermsEnum.cs      |  6 +--
 src/Lucene.Net/Index/DocTermOrds.cs             | 22 +++++-----
 .../Index/DocumentsWriterDeleteQueue.cs         | 12 +++---
 .../Index/FlushByRamOrCountsPolicy.cs           | 12 +++---
 src/Lucene.Net/Index/FlushPolicy.cs             |  8 ++--
 src/Lucene.Net/Index/IndexReader.cs             |  8 ++--
 src/Lucene.Net/Store/CompoundFileDirectory.cs   | 34 +++++++--------
 src/Lucene.Net/Store/Directory.cs               | 12 +++---
 src/Lucene.Net/Store/FSDirectory.cs             | 12 +++---
 src/Lucene.Net/Support/C5.Support.cs            | 30 ++++++-------
 .../Support/Codecs/DefaultCodecFactory.cs       | 16 +++----
 .../Codecs/DefaultDocValuesFormatFactory.cs     | 16 +++----
 .../Codecs/DefaultPostingsFormatFactory.cs      | 16 +++----
 src/Lucene.Net/Support/HashMap.cs               | 12 +++---
 src/Lucene.Net/Support/IO/Buffer.cs             | 24 +++++------
 src/Lucene.Net/Support/IO/ByteBuffer.cs         | 10 ++---
 src/Lucene.Net/Support/IO/FileSupport.cs        |  4 +-
 src/Lucene.Net/Support/IO/LongBuffer.cs         |  8 ++--
 .../Support/IO/LongToByteBufferAdapter.cs       |  8 ++--
 src/Lucene.Net/Support/LinkedHashMap.cs         | 12 +++---
 src/Lucene.Net/Support/StringExtensions.cs      |  4 +-
 src/Lucene.Net/Util/ArrayUtil.cs                | 10 ++---
 125 files changed, 701 insertions(+), 701 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
index c9a3495..095d92f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
@@ -37,9 +37,9 @@ namespace Lucene.Net.Analysis.Ar
     /// <para/>
     /// The analysis package contains three primary components:
     /// <list type="bullet">
-    ///     <item><see cref="ArabicNormalizationFilter"/>: Arabic orthographic normalization.</item>
-    ///     <item><see cref="ArabicStemFilter"/>: Arabic light stemming</item>
-    ///     <item>Arabic stop words file: a set of default Arabic stop words.</item>
+    ///     <item><description><see cref="ArabicNormalizationFilter"/>: Arabic orthographic normalization.</description></item>
+    ///     <item><description><see cref="ArabicStemFilter"/>: Arabic light stemming</description></item>
+    ///     <item><description>Arabic stop words file: a set of default Arabic stop words.</description></item>
     /// </list>
     /// </summary>
     public sealed class ArabicAnalyzer : StopwordAnalyzerBase

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
index ae875e4..84ccc30 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.Ar
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="ArabicLetterTokenizer"/>:
     /// <list type="bullet">
-    /// <item>As of 3.1, <see cref="Util.CharTokenizer"/> uses an int based API to normalize and
+    /// <item><description>As of 3.1, <see cref="Util.CharTokenizer"/> uses an int based API to normalize and
     /// detect token characters. See <see cref="IsTokenChar(int)"/> and
-    /// <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</item>
+    /// <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs
index 9733198..7556a43 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs
@@ -26,11 +26,11 @@ namespace Lucene.Net.Analysis.Ar
     /// <para/>
     /// Normalization is defined as:
     /// <list type="bullet">
-    ///     <item> Normalization of hamza with alef seat to a bare alef.</item>
-    ///     <item> Normalization of teh marbuta to heh</item>
-    ///     <item> Normalization of dotless yeh (alef maksura) to yeh.</item>
-    ///     <item> Removal of Arabic diacritics (the harakat)</item>
-    ///     <item> Removal of tatweel (stretching character).</item>
+    ///     <item><description> Normalization of hamza with alef seat to a bare alef.</description></item>
+    ///     <item><description> Normalization of teh marbuta to heh</description></item>
+    ///     <item><description> Normalization of dotless yeh (alef maksura) to yeh.</description></item>
+    ///     <item><description> Removal of Arabic diacritics (the harakat)</description></item>
+    ///     <item><description> Removal of tatweel (stretching character).</description></item>
     /// </list>
     /// </summary>
     public class ArabicNormalizer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs
index 444b5d3..a7de3af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs
@@ -26,8 +26,8 @@ namespace Lucene.Net.Analysis.Ar
     /// <para/>
     /// Stemming is defined as:
     /// <list type="bullet">
-    ///     <item> Removal of attached definite article, conjunction, and prepositions.</item>
-    ///     <item> Stemming of common suffixes.</item>
+    ///     <item><description> Removal of attached definite article, conjunction, and prepositions.</description></item>
+    ///     <item><description> Stemming of common suffixes.</description></item>
     /// </list>
     /// </summary>
     public class ArabicStemmer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
index ab39999..ba84523 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
@@ -33,8 +33,8 @@ namespace Lucene.Net.Analysis.Ca
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating CatalanAnalyzer:
     /// <list>
-    ///   <item> As of 3.6, <see cref="ElisionFilter"/> with a set of Catalan 
-    ///        contractions is used by default.</item>
+    ///   <item><description> As of 3.6, <see cref="ElisionFilter"/> with a set of Catalan 
+    ///        contractions is used by default.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
index 901320b..babbee1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
@@ -33,9 +33,9 @@ namespace Lucene.Net.Analysis.Cjk
     /// </para>
     /// Additionally, the following is applied to Latin text (such as English):
     /// <list type="bullet">
-    ///     <item>Text is converted to lowercase.</item>
-    ///     <item>Numeric digits, '+', '#', and '_' are tokenized as letters.</item>
-    ///     <item>Full-width forms are converted to half-width forms.</item>
+    ///     <item><description>Text is converted to lowercase.</description></item>
+    ///     <item><description>Numeric digits, '+', '#', and '_' are tokenized as letters.</description></item>
+    ///     <item><description>Full-width forms are converted to half-width forms.</description></item>
     /// </list>
     /// For more info on Asian language (Chinese, Japanese, and Korean) text segmentation:
     /// please search  <a

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
index 64018e2..b109aac 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
@@ -24,8 +24,8 @@ namespace Lucene.Net.Analysis.Cjk
     /// <summary>
     /// A <see cref="TokenFilter"/> that normalizes CJK width differences:
     /// <list type="bullet">
-    ///   <item>Folds fullwidth ASCII variants into the equivalent basic latin</item>
-    ///   <item>Folds halfwidth Katakana variants into the equivalent kana</item>
+    ///   <item><description>Folds fullwidth ASCII variants into the equivalent basic latin</description></item>
+    ///   <item><description>Folds halfwidth Katakana variants into the equivalent kana</description></item>
     /// </list>
     /// <para>
     /// NOTE: this filter can be viewed as a (practical) subset of NFKC/NFKD

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs
index 19135d9..78e6750 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs
@@ -26,12 +26,12 @@ namespace Lucene.Net.Analysis.Ckb
     /// <para/>
     /// Normalization consists of:
     /// <list type="bullet">
-    ///   <item>Alternate forms of 'y' (0064, 0649) are converted to 06CC (FARSI YEH)</item>
-    ///   <item>Alternate form of 'k' (0643) is converted to 06A9 (KEHEH)</item>
-    ///   <item>Alternate forms of vowel 'e' (0647+200C, word-final 0647, 0629) are converted to 06D5 (AE)</item>
-    ///   <item>Alternate (joining) form of 'h' (06BE) is converted to 0647</item>
-    ///   <item>Alternate forms of 'rr' (0692, word-initial 0631) are converted to 0695 (REH WITH SMALL V BELOW)</item>
-    ///   <item>Harakat, tatweel, and formatting characters such as directional controls are removed.</item>
+    ///   <item><description>Alternate forms of 'y' (0064, 0649) are converted to 06CC (FARSI YEH)</description></item>
+    ///   <item><description>Alternate form of 'k' (0643) is converted to 06A9 (KEHEH)</description></item>
+    ///   <item><description>Alternate forms of vowel 'e' (0647+200C, word-final 0647, 0629) are converted to 06D5 (AE)</description></item>
+    ///   <item><description>Alternate (joining) form of 'h' (06BE) is converted to 0647</description></item>
+    ///   <item><description>Alternate forms of 'rr' (0692, word-initial 0631) are converted to 0695 (REH WITH SMALL V BELOW)</description></item>
+    ///   <item><description>Harakat, tatweel, and formatting characters such as directional controls are removed.</description></item>
     /// </list>
     /// </summary>
     public class SoraniNormalizer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
index 61e6576..47ff4a5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
@@ -27,15 +27,15 @@ namespace Lucene.Net.Analysis.Cn
     /// <summary>
     /// A <see cref="TokenFilter"/> with a stop word table.  
     /// <list type="bullet">
-    ///     <item>Numeric tokens are removed.</item>
-    ///     <item>English tokens must be larger than 1 character.</item>
-    ///     <item>One Chinese character as one Chinese word.</item>
+    ///     <item><description>Numeric tokens are removed.</description></item>
+    ///     <item><description>English tokens must be larger than 1 character.</description></item>
+    ///     <item><description>One Chinese character as one Chinese word.</description></item>
     /// </list>
     /// TO DO:
     /// <list type="number">
-    ///     <item>Add Chinese stop words, such as \ue400</item>
-    ///     <item>Dictionary based Chinese word extraction</item>
-    ///     <item>Intelligent Chinese word extraction</item>
+    ///     <item><description>Add Chinese stop words, such as \ue400</description></item>
+    ///     <item><description>Dictionary based Chinese word extraction</description></item>
+    ///     <item><description>Intelligent Chinese word extraction</description></item>
     /// </list>
     /// </summary>
     /// @deprecated (3.1) Use <see cref="Core.StopFilter"/> instead, which has the same functionality.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
index cd98aca..9b127df 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
@@ -34,8 +34,8 @@ namespace Lucene.Net.Analysis.Cn
     /// For example, if the Chinese text
     /// "C1C2C3C4" is to be indexed:
     /// <list type="bullet">
-    ///     <item>The tokens returned from ChineseTokenizer are C1, C2, C3, C4.</item>
-    ///     <item>The tokens returned from the CJKTokenizer are C1C2, C2C3, C3C4.</item>
+    ///     <item><description>The tokens returned from ChineseTokenizer are C1, C2, C3, C4.</description></item>
+    ///     <item><description>The tokens returned from the CJKTokenizer are C1C2, C2C3, C3C4.</description></item>
     /// </list>
     /// </para>
     /// <para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
index fcd9b7a..b4bd0fd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
@@ -34,10 +34,10 @@ namespace Lucene.Net.Analysis.CommonGrams
     /// use of <see cref="PositionIncrementAttribute.PositionIncrement"/>. Bigrams have a type
     /// of <see cref="GRAM_TYPE"/> Example:
     /// <list type="bullet">
-    ///     <item>input:"the quick brown fox"</item>
-    ///     <item>output:|"the","the-quick"|"brown"|"fox"|</item>
-    ///     <item>"the-quick" has a position increment of 0 so it is in the same position
-    /// as "the" "the-quick" has a term.type() of "gram"</item>
+    ///     <item><description>input:"the quick brown fox"</description></item>
+    ///     <item><description>output:|"the","the-quick"|"brown"|"fox"|</description></item>
+    ///     <item><description>"the-quick" has a position increment of 0 so it is in the same position
+    /// as "the" "the-quick" has a term.type() of "gram"</description></item>
     /// </list>
     /// </summary>
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
index 07e7b53..2b59887 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Analysis.CommonGrams
     /// <para/>
     /// Example:
     /// <list type="bullet">
-    ///     <item>query input to CommonGramsFilter: "the rain in spain falls mainly"</item>
-    ///     <item>output of CommomGramsFilter/input to CommonGramsQueryFilter:
-    ///     |"the, "the-rain"|"rain" "rain-in"|"in, "in-spain"|"spain"|"falls"|"mainly"</item>
-    ///     <item>output of CommonGramsQueryFilter:"the-rain", "rain-in" ,"in-spain",
-    ///     "falls", "mainly"</item>
+    ///     <item><description>query input to CommonGramsFilter: "the rain in spain falls mainly"</description></item>
+    ///     <item><description>output of CommomGramsFilter/input to CommonGramsQueryFilter:
+    ///     |"the, "the-rain"|"rain" "rain-in"|"in, "in-spain"|"spain"|"falls"|"mainly"</description></item>
+    ///     <item><description>output of CommonGramsQueryFilter:"the-rain", "rain-in" ,"in-spain",
+    ///     "falls", "mainly"</description></item>
     /// </list>
     /// </summary>
     /// <remarks>
@@ -84,8 +84,8 @@ namespace Lucene.Net.Analysis.CommonGrams
         /// Output bigrams whenever possible to optimize queries. Only output unigrams
         /// when they are not a member of a bigram. Example:
         /// <list type="bullet">
-        ///     <item>input: "the rain in spain falls mainly"</item>
-        ///     <item>output:"the-rain", "rain-in" ,"in-spain", "falls", "mainly"</item>
+        ///     <item><description>input: "the rain in spain falls mainly"</description></item>
+        ///     <item><description>output:"the-rain", "rain-in" ,"in-spain", "falls", "mainly"</description></item>
         /// </list>
         /// </summary>
         public override bool IncrementToken()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs
index f479951..5e176af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs
@@ -30,10 +30,10 @@ namespace Lucene.Net.Analysis.Compound
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="CompoundWordTokenFilterBase"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
+    ///     <item><description>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
     ///     supplementary characters in strings and char arrays provided as compound word
-    ///     dictionaries.</item>
-    ///     <item>As of 4.4, <see cref="CompoundWordTokenFilterBase"/> doesn't update offsets.</item>
+    ///     dictionaries.</description></item>
+    ///     <item><description>As of 4.4, <see cref="CompoundWordTokenFilterBase"/> doesn't update offsets.</description></item>
     /// </list>
     /// </summary>
     public abstract class CompoundWordTokenFilterBase : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs
index 12ce070..063c731 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs
@@ -31,9 +31,9 @@ namespace Lucene.Net.Analysis.Compound
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="CompoundWordTokenFilterBase"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
+    ///     <item><description>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
     ///     supplementary characters in strings and char arrays provided as compound word
-    ///     dictionaries.</item>
+    ///     dictionaries.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
index 82feaec..d3758df 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
@@ -98,8 +98,8 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
         /// reserved:
         /// </para>
         /// <list type="bullet">
-        ///     <item>0x0000 as string terminator</item>
-        ///     <item>0xFFFF to indicate that the branch starting at this node is compressed</item>
+        ///     <item><description>0x0000 as string terminator</description></item>
+        ///     <item><description>0xFFFF to indicate that the branch starting at this node is compressed</description></item>
         /// </list>
         /// <para>
         /// This shouldn't be a problem if we give the usual semantics to strings since

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
index 533b76e..0e263ed 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.Compound
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="CompoundWordTokenFilterBase"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
+    ///     <item><description>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
     ///     supplementary characters in strings and char arrays provided as compound word
-    ///     dictionaries.</item>
+    ///     dictionaries.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs
index c2f69c4..8c53368 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs
@@ -29,15 +29,15 @@ namespace Lucene.Net.Analysis.Compound
     /// <para/>
     /// This factory accepts the following parameters:
     /// <list type="bullet">
-    ///     <item><code>hyphenator</code> (mandatory): path to the FOP xml hyphenation pattern. 
-    ///     See <a href="http://offo.sourceforge.net/hyphenation/">http://offo.sourceforge.net/hyphenation/</a>.</item>
-    ///     <item><code>encoding</code> (optional): encoding of the xml hyphenation file. defaults to UTF-8.</item>
-    ///     <item><code>dictionary</code> (optional): dictionary of words. defaults to no dictionary.</item>
-    ///     <item><code>minWordSize</code> (optional): minimal word length that gets decomposed. defaults to 5.</item>
-    ///     <item><code>minSubwordSize</code> (optional): minimum length of subwords. defaults to 2.</item>
-    ///     <item><code>maxSubwordSize</code> (optional): maximum length of subwords. defaults to 15.</item>
-    ///     <item><code>onlyLongestMatch</code> (optional): if true, adds only the longest matching subword 
-    ///     to the stream. defaults to false.</item>
+    ///     <item><description><code>hyphenator</code> (mandatory): path to the FOP xml hyphenation pattern. 
+    ///     See <a href="http://offo.sourceforge.net/hyphenation/">http://offo.sourceforge.net/hyphenation/</a>.</description></item>
+    ///     <item><description><code>encoding</code> (optional): encoding of the xml hyphenation file. defaults to UTF-8.</description></item>
+    ///     <item><description><code>dictionary</code> (optional): dictionary of words. defaults to no dictionary.</description></item>
+    ///     <item><description><code>minWordSize</code> (optional): minimal word length that gets decomposed. defaults to 5.</description></item>
+    ///     <item><description><code>minSubwordSize</code> (optional): minimum length of subwords. defaults to 2.</description></item>
+    ///     <item><description><code>maxSubwordSize</code> (optional): maximum length of subwords. defaults to 15.</description></item>
+    ///     <item><description><code>onlyLongestMatch</code> (optional): if true, adds only the longest matching subword 
+    ///     to the stream. defaults to false.</description></item>
     /// </list>
     /// <para>
     /// <code>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
index 1be2e65..4b45693 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="LetterTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="CharTokenizer"/> uses an <see cref="int"/> based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="CharTokenizer"/> uses an <see cref="int"/> based API to normalize and
     ///     detect token characters. See <see cref="CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs
index 36bde21..5f9ee42 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.Core
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating LowerCaseFilter:
     /// <list type="bullet">
-    ///     <item> As of 3.1, supplementary characters are properly lowercased.</item>
+    ///     <item><description> As of 3.1, supplementary characters are properly lowercased.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs
index 6db79e8..a3408b2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs
@@ -35,9 +35,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="LowerCaseTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="Util.CharTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="Util.CharTokenizer"/> uses an int based API to normalize and
     ///     detect token characters. See <see cref="Util.CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs
index 80586d0..0d49f35 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs
@@ -27,9 +27,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility
     /// when creating <see cref="Util.CharTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="LowerCaseTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="LowerCaseTokenizer"/> uses an int based API to normalize and
     ///     detect token codepoints. See <see cref="Util.CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
index e91072e..0a4d34c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
@@ -29,9 +29,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="StopAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.1, StopFilter correctly handles Unicode 4.0
-    ///         supplementary characters in stopwords</item>
-    ///     <item> As of 2.9, position increments are preserved</item>
+    ///     <item><description> As of 3.1, StopFilter correctly handles Unicode 4.0
+    ///         supplementary characters in stopwords</description></item>
+    ///     <item><description> As of 2.9, position increments are preserved</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
index 2515426..e8ae3b7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="StopFilter"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, StopFilter correctly handles Unicode 4.0
+    ///     <item><description>As of 3.1, StopFilter correctly handles Unicode 4.0
     ///         supplementary characters in stopwords and position
-    ///         increments are preserved</item>
+    ///         increments are preserved</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
index 0b5feb8..9ff7e7b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
@@ -39,30 +39,30 @@ namespace Lucene.Net.Analysis.Core
     /// All attributes are optional:
     /// </para>
     /// <list type="bullet">
-    ///     <item><c>ignoreCase</c> defaults to <c>false</c></item>
-    ///     <item><c>words</c> should be the name of a stopwords file to parse, if not 
+    ///     <item><description><c>ignoreCase</c> defaults to <c>false</c></description></item>
+    ///     <item><description><c>words</c> should be the name of a stopwords file to parse, if not 
     ///      specified the factory will use <see cref="StopAnalyzer.ENGLISH_STOP_WORDS_SET"/>
-    ///     </item>
-    ///     <item><c>format</c> defines how the <c>words</c> file will be parsed, 
+    ///     </description></item>
+    ///     <item><description><c>format</c> defines how the <c>words</c> file will be parsed, 
     ///      and defaults to <c>wordset</c>.  If <c>words</c> is not specified, 
     ///      then <c>format</c> must not be specified.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// <para>
     /// The valid values for the <c>format</c> option are:
     /// </para>
     /// <list type="bullet">
-    ///  <item><c>wordset</c> - This is the default format, which supports one word per 
+    ///  <item><description><c>wordset</c> - This is the default format, which supports one word per 
     ///      line (including any intra-word whitespace) and allows whole line comments 
     ///      begining with the "#" character.  Blank lines are ignored.  See 
     ///      <see cref="WordlistLoader.GetLines"/> for details.
-    ///  </item>
-    ///  <item><c>snowball</c> - This format allows for multiple words specified on each 
+    ///  </description></item>
+    ///  <item><description><c>snowball</c> - This format allows for multiple words specified on each 
     ///      line, and trailing comments may be specified using the vertical line ("&#124;"). 
     ///      Blank lines are ignored.  See 
     ///      <see cref="WordlistLoader.GetSnowballWordSet(System.IO.TextReader, Net.Util.LuceneVersion)"/> 
     ///      for details.
-    ///  </item>
+    ///  </description></item>
     /// </list>
     /// </summary>
     public class StopFilterFactory : TokenFilterFactory, IResourceLoaderAware

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs
index 6becd82..09e8028 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility
     /// when creating <see cref="CharTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="WhitespaceTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="WhitespaceTokenizer"/> uses an int based API to normalize and
     ///     detect token codepoints. See <see cref="Util.CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
index 98db5e7..cee9568 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
@@ -29,9 +29,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="WhitespaceTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
     ///     detect token characters. See <see cref="CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
index 5efdf4b..cffbe49 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
@@ -38,10 +38,10 @@ namespace Lucene.Net.Analysis.Cz
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="CzechAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, words are stemmed with <see cref="CzechStemFilter"/></item>
-    ///     <item>As of 2.9, StopFilter preserves position increments</item>
-    ///     <item>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
-    ///     <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</item>
+    ///     <item><description>As of 3.1, words are stemmed with <see cref="CzechStemFilter"/></description></item>
+    ///     <item><description>As of 2.9, StopFilter preserves position increments</description></item>
+    ///     <item><description>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
+    ///     <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
index 19a46d4..1a6a350 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
@@ -45,11 +45,11 @@ namespace Lucene.Net.Analysis.De
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating GermanAnalyzer:
     /// <list>
-    ///   <item> As of 3.6, GermanLightStemFilter is used for less aggressive stemming.</item>
-    ///   <item> As of 3.1, Snowball stemming is done with SnowballFilter, and 
-    ///        Snowball stopwords are used by default.</item>
-    ///   <item> As of 2.9, StopFilter preserves position
-    ///        increments</item>
+    ///   <item><description> As of 3.6, GermanLightStemFilter is used for less aggressive stemming.</description></item>
+    ///   <item><description> As of 3.1, Snowball stemming is done with SnowballFilter, and 
+    ///        Snowball stopwords are used by default.</description></item>
+    ///   <item><description> As of 2.9, StopFilter preserves position
+    ///        increments</description></item>
     /// </list>
     /// 
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
index 7160e1c..fc4073a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
@@ -28,10 +28,10 @@ namespace Lucene.Net.Analysis.De
     /// It allows for the fact that ä, ö and ü are sometimes written as ae, oe and ue.
     /// <para>
     /// <list>
-    ///     <item> 'ß' is replaced by 'ss'</item>
-    ///     <item> 'ä', 'ö', 'ü' are replaced by 'a', 'o', 'u', respectively.</item>
-    ///     <item> 'ae' and 'oe' are replaced by 'a', and 'o', respectively.</item>
-    ///     <item> 'ue' is replaced by 'u', when not following a vowel or q.</item>
+    ///     <item><description> 'ß' is replaced by 'ss'</description></item>
+    ///     <item><description> 'ä', 'ö', 'ü' are replaced by 'a', 'o', 'u', respectively.</description></item>
+    ///     <item><description> 'ae' and 'oe' are replaced by 'a', and 'o', respectively.</description></item>
+    ///     <item><description> 'ue' is replaced by 'u', when not following a vowel or q.</description></item>
     /// </list>
     /// </para>
     /// <para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
index 99f2455..47e9074 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
@@ -176,12 +176,12 @@ namespace Lucene.Net.Analysis.De
         /// Do some substitutions for the term to reduce overstemming:
         /// 
         /// <list type="bullet">
-        /// <item>Substitute Umlauts with their corresponding vowel: äöü -> aou,
-        ///   "ß" is substituted by "ss"</item>
-        /// <item>Substitute a second char of a pair of equal characters with
-        ///   an asterisk: ?? -> ?*</item>
-        /// <item>Substitute some common character combinations with a token:
-        ///   sch/ch/ei/ie/ig/st -> $/§/%/&amp;/#/!</item>
+        /// <item><description>Substitute Umlauts with their corresponding vowel: äöü -> aou,
+        ///   "ß" is substituted by "ss"</description></item>
+        /// <item><description>Substitute a second char of a pair of equal characters with
+        ///   an asterisk: ?? -> ?*</description></item>
+        /// <item><description>Substitute some common character combinations with a token:
+        ///   sch/ch/ei/ie/ig/st -> $/§/%/&amp;/#/!</description></item>
         /// </list>
         /// </summary>
         private void Substitute(StringBuilder buffer)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
index db0e978..061ed9e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.El
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="GreekAnalyzer"/>:
     /// <list type="bullet">
-    ///   <item> As of 3.1, StandardFilter and GreekStemmer are used by default.</item>
-    ///   <item> As of 2.9, StopFilter preserves position
-    ///        increments</item>
+    ///   <item><description> As of 3.1, StandardFilter and GreekStemmer are used by default.</description></item>
+    ///   <item><description> As of 2.9, StopFilter preserves position
+    ///        increments</description></item>
     /// </list>
     /// </para>
     /// <para><c>NOTE</c>: This class uses the same <see cref="LuceneVersion"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
index e501475..85f4bd3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.El
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="GreekLowerCaseFilter"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.1, supplementary characters are properly lowercased.</item>
+    ///     <item><description> As of 3.1, supplementary characters are properly lowercased.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
index 888e7a8..5e8ac98 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
@@ -26,9 +26,9 @@ namespace Lucene.Net.Analysis.En
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="EnglishPossessiveFilter"/>:
     /// <list type="bullet">
-    ///    <item> As of 3.6, U+2019 RIGHT SINGLE QUOTATION MARK and 
+    ///    <item><description> As of 3.6, U+2019 RIGHT SINGLE QUOTATION MARK and 
     ///         U+FF07 FULLWIDTH APOSTROPHE are also treated as
-    ///         quotation marks.</item>
+    ///         quotation marks.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
index de05df7..b537856 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.Es
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="SpanishAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.6, <see cref="SpanishLightStemFilter"/> is used for less aggressive stemming.</item>
+    ///     <item><description> As of 3.6, <see cref="SpanishLightStemFilter"/> is used for less aggressive stemming.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
index 81a2cb2..7840ab1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Analysis.Fa
     /// <para>
     /// Normalization is defined as:
     /// <list type="bullet">
-    ///     <item>Normalization of various heh + hamza forms and heh goal to heh.</item>
-    ///     <item>Normalization of farsi yeh and yeh barree to arabic yeh</item>
-    ///     <item>Normalization of persian keheh to arabic kaf</item>
+    ///     <item><description>Normalization of various heh + hamza forms and heh goal to heh.</description></item>
+    ///     <item><description>Normalization of farsi yeh and yeh barree to arabic yeh</description></item>
+    ///     <item><description>Normalization of persian keheh to arabic kaf</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
index a2a99ac..1d117a8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
@@ -40,12 +40,12 @@ namespace Lucene.Net.Analysis.Fr
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating FrenchAnalyzer:
     /// <list type="bullet">
-    ///   <item> As of 3.6, <see cref="FrenchLightStemFilter"/> is used for less aggressive stemming.</item>
-    ///   <item> As of 3.1, Snowball stemming is done with <see cref="SnowballFilter"/>, 
+    ///   <item><description> As of 3.6, <see cref="FrenchLightStemFilter"/> is used for less aggressive stemming.</description></item>
+    ///   <item><description> As of 3.1, Snowball stemming is done with <see cref="SnowballFilter"/>, 
     ///        <see cref="LowerCaseFilter"/> is used prior to <see cref="StopFilter"/>, and <see cref="ElisionFilter"/> and 
-    ///        Snowball stopwords are used by default.</item>
-    ///   <item> As of 2.9, <see cref="StopFilter"/> preserves position
-    ///        increments</item>
+    ///        Snowball stopwords are used by default.</description></item>
+    ///   <item><description> As of 2.9, <see cref="StopFilter"/> preserves position
+    ///        increments</description></item>
     /// </list>
     /// 
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
index 9ee40ac..28198f2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Analysis.Hi
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating HindiAnalyzer:
     /// <list type="bullet">
-    ///     <item> As of 3.6, StandardTokenizer is used for tokenization</item>
+    ///     <item><description> As of 3.6, StandardTokenizer is used for tokenization</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs
index 45144a6..96bb1f9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs
@@ -36,8 +36,8 @@ namespace Lucene.Net.Analysis.Hi
     /// Leah S. Larkey, Margaret E. Connell, and Nasreen AbdulJaleel.
     /// http://maroo.cs.umass.edu/pub/web/getpdf.php?id=454:
     /// <list type="bullet">
-    ///     <item>Internal Zero-width joiner and Zero-width non-joiners are removed</item>
-    ///     <item>In addition to chandrabindu, NA+halant is normalized to anusvara</item>
+    ///     <item><description>Internal Zero-width joiner and Zero-width non-joiners are removed</description></item>
+    ///     <item><description>In addition to chandrabindu, NA+halant is normalized to anusvara</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
index 6ef83dc..d428e63 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.It
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="ItalianAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.6, <see cref="ItalianLightStemFilter"/> is used for less aggressive stemming.</item>
-    ///     <item> As of 3.2, <see cref="ElisionFilter"/> with a set of Italian 
-    ///        contractions is used by default.</item>
+    ///     <item><description> As of 3.6, <see cref="ItalianLightStemFilter"/> is used for less aggressive stemming.</description></item>
+    ///     <item><description> As of 3.2, <see cref="ElisionFilter"/> with a set of Italian 
+    ///        contractions is used by default.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
index cb75bef..6b37b79 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Analysis.Lv
     /// This is a light version of the algorithm in Karlis Kreslin's PhD thesis
     /// <c>A stemming algorithm for Latvian</c> with the following modifications:
     /// <list type="bullet">
-    ///   <item>Only explicitly stems noun and adjective morphology</item>
-    ///   <item>Stricter length/vowel checks for the resulting stems (verb etc suffix stripping is removed)</item>
-    ///   <item>Removes only the primary inflectional suffixes: case and number for nouns ; 
-    ///       case, number, gender, and definitiveness for adjectives.</item>
-    ///   <item>Palatalization is only handled when a declension II,V,VI noun suffix is removed.</item>
+    ///   <item><description>Only explicitly stems noun and adjective morphology</description></item>
+    ///   <item><description>Stricter length/vowel checks for the resulting stems (verb etc suffix stripping is removed)</description></item>
+    ///   <item><description>Removes only the primary inflectional suffixes: case and number for nouns ; 
+    ///       case, number, gender, and definitiveness for adjectives.</description></item>
+    ///   <item><description>Palatalization is only handled when a declension II,V,VI noun suffix is removed.</description></item>
     /// </list>
     /// </para>
     /// </summary>
@@ -94,10 +94,10 @@ namespace Lucene.Net.Analysis.Lv
         /// <summary>
         /// Most cases are handled except for the ambiguous ones:
         /// <list type="bullet">
-        ///     <item> s -> š</item>
-        ///     <item> t -> š</item>
-        ///     <item> d -> ž</item>
-        ///     <item> z -> ž</item>
+        ///     <item><description> s -> š</description></item>
+        ///     <item><description> t -> š</description></item>
+        ///     <item><description> d -> ž</description></item>
+        ///     <item><description> z -> ž</description></item>
         /// </list>
         /// </summary>
         private int Unpalatalize(char[] s, int len)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
index 582a461..f735ef7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
@@ -30,22 +30,22 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// those characters with reasonable ASCII alternatives are converted:
     /// 
     /// <ul>
-    ///   <item>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></item>
-    ///   <item>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></item>
-    ///   <item>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></item>
-    ///   <item>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></item>
-    ///   <item>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a></item>
-    ///   <item>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a></item>
-    ///   <item>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a></item>
-    ///   <item>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a></item>
-    ///   <item>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a></item>
-    ///   <item>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a></item>
-    ///   <item>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a></item>
-    ///   <item>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a></item>
-    ///   <item>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a></item>
-    ///   <item>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></item>
-    ///   <item>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></item>
-    ///   <item>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></item>
+    ///   <item><description>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></description></item>
+    ///   <item><description>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></description></item>
+    ///   <item><description>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></description></item>
+    ///   <item><description>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></description></item>
+    ///   <item><description>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a></description></item>
+    ///   <item><description>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a></description></item>
+    ///   <item><description>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a></description></item>
+    ///   <item><description>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a></description></item>
+    ///   <item><description>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a></description></item>
+    ///   <item><description>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a></description></item>
+    ///   <item><description>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a></description></item>
+    ///   <item><description>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a></description></item>
+    ///   <item><description>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a></description></item>
+    ///   <item><description>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></description></item>
+    ///   <item><description>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></description></item>
+    ///   <item><description>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></description></item>
     /// </ul>
     /// <para/>
     /// See: <a href="http://en.wikipedia.org/wiki/Latin_characters_in_Unicode">http://en.wikipedia.org/wiki/Latin_characters_in_Unicode</a>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
index f79ef5e..1e8ac7d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
@@ -98,41 +98,41 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// Splits words into subwords and performs optional transformations on subword
     /// groups. Words are split into subwords with the following rules:
     /// <list type="bullet">
-    ///     <item>split on intra-word delimiters (by default, all non alpha-numeric
-    ///         characters): <c>"Wi-Fi"</c> &#8594; <c>"Wi", "Fi"</c></item>
-    ///     <item>split on case transitions: <c>"PowerShot"</c> &#8594;
-    ///         <c>"Power", "Shot"</c></item>
-    ///     <item>split on letter-number transitions: <c>"SD500"</c> &#8594;
-    ///         <c>"SD", "500"</c></item>
-    ///     <item>leading and trailing intra-word delimiters on each subword are ignored:
+    ///     <item><description>split on intra-word delimiters (by default, all non alpha-numeric
+    ///         characters): <c>"Wi-Fi"</c> &#8594; <c>"Wi", "Fi"</c></description></item>
+    ///     <item><description>split on case transitions: <c>"PowerShot"</c> &#8594;
+    ///         <c>"Power", "Shot"</c></description></item>
+    ///     <item><description>split on letter-number transitions: <c>"SD500"</c> &#8594;
+    ///         <c>"SD", "500"</c></description></item>
+    ///     <item><description>leading and trailing intra-word delimiters on each subword are ignored:
     ///         <c>"//hello---there, 'dude'"</c> &#8594;
-    ///         <c>"hello", "there", "dude"</c></item>
-    ///     <item>trailing "'s" are removed for each subword: <c>"O'Neil's"</c>
+    ///         <c>"hello", "there", "dude"</c></description></item>
+    ///     <item><description>trailing "'s" are removed for each subword: <c>"O'Neil's"</c>
     ///         &#8594; <c>"O", "Neil"</c>
     ///         <ul>
-    ///             <item>Note: this step isn't performed in a separate filter because of possible
-    ///                 subword combinations.</item>
+    ///             <item><description>Note: this step isn't performed in a separate filter because of possible
+    ///                 subword combinations.</description></item>
     ///         </ul>
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// <para/>
     /// The <b>combinations</b> parameter affects how subwords are combined:
     /// <list type="bullet">
-    ///     <item>combinations="0" causes no subword combinations: <code>"PowerShot"</code>
-    ///         &#8594; <c>0:"Power", 1:"Shot"</c> (0 and 1 are the token positions)</item>
-    ///     <item>combinations="1" means that in addition to the subwords, maximum runs of
+    ///     <item><description>combinations="0" causes no subword combinations: <code>"PowerShot"</code>
+    ///         &#8594; <c>0:"Power", 1:"Shot"</c> (0 and 1 are the token positions)</description></item>
+    ///     <item><description>combinations="1" means that in addition to the subwords, maximum runs of
     ///         non-numeric subwords are catenated and produced at the same position of the
     ///         last subword in the run:
     ///         <ul>
-    ///             <item><c>"PowerShot"</c> &#8594;
-    ///                 <c>0:"Power", 1:"Shot" 1:"PowerShot"</c></item>
-    ///             <item><c>"A's+B's&amp;C's"</c> -gt; <c>0:"A", 1:"B", 2:"C", 2:"ABC"</c>
-    ///             </item>
-    ///             <item><c>"Super-Duper-XL500-42-AutoCoder!"</c> &#8594;
+    ///             <item><description><c>"PowerShot"</c> &#8594;
+    ///                 <c>0:"Power", 1:"Shot" 1:"PowerShot"</c></description></item>
+    ///             <item><description><c>"A's+B's&amp;C's"</c> -gt; <c>0:"A", 1:"B", 2:"C", 2:"ABC"</c>
+    ///             </description></item>
+    ///             <item><description><c>"Super-Duper-XL500-42-AutoCoder!"</c> &#8594;
     ///                 <c>0:"Super", 1:"Duper", 2:"XL", 2:"SuperDuperXL", 3:"500" 4:"42", 5:"Auto", 6:"Coder", 6:"AutoCoder"</c>
-    ///             </item>
+    ///             </description></item>
     ///         </ul>
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// <para/>
     /// One use for <see cref="WordDelimiterFilter"/> is to help match words with different

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs
index ed2cb3d..7ecb1e5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs
@@ -27,12 +27,12 @@ namespace Lucene.Net.Analysis.NGram
     /// </para>
     /// <para>As of Lucene 4.4, this tokenizer
     /// <list type="bullet">
-    ///     <item>can handle <code>maxGram</code> larger than 1024 chars, but beware that this will result in increased memory usage</item>
-    ///     <item>doesn't trim the input,</item>
-    ///     <item>sets position increments equal to 1 instead of 1 for the first token and 0 for all other ones</item>
-    ///     <item>doesn't support backward n-grams anymore.</item>
-    ///     <item>supports <see cref="Util.CharTokenizer.IsTokenChar(int)"/> pre-tokenization,</item>
-    ///     <item>correctly handles supplementary characters.</item>
+    ///     <item><description>can handle <code>maxGram</code> larger than 1024 chars, but beware that this will result in increased memory usage</description></item>
+    ///     <item><description>doesn't trim the input,</description></item>
+    ///     <item><description>sets position increments equal to 1 instead of 1 for the first token and 0 for all other ones</description></item>
+    ///     <item><description>doesn't support backward n-grams anymore.</description></item>
+    ///     <item><description>supports <see cref="Util.CharTokenizer.IsTokenChar(int)"/> pre-tokenization,</description></item>
+    ///     <item><description>correctly handles supplementary characters.</description></item>
     /// </list>
     /// </para>
     /// <para>Although <b style="color:red">highly</b> discouraged, it is still possible

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs
index 2b0af35..416c96f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs
@@ -27,12 +27,12 @@ namespace Lucene.Net.Analysis.NGram
     /// <para>You must specify the required <see cref="LuceneVersion"/> compatibility when
     /// creating a <see cref="NGramTokenFilter"/>. As of Lucene 4.4, this token filters:
     /// <list type="bullet">
-    ///     <item>handles supplementary characters correctly,</item>
-    ///     <item>emits all n-grams for the same token at the same position,</item>
-    ///     <item>does not modify offsets,</item>
-    ///     <item>sorts n-grams by their offset in the original token first, then
+    ///     <item><description>handles supplementary characters correctly,</description></item>
+    ///     <item><description>emits all n-grams for the same token at the same position,</description></item>
+    ///     <item><description>does not modify offsets,</description></item>
+    ///     <item><description>sorts n-grams by their offset in the original token first, then
     ///         increasing length (meaning that "abc" will give "a", "ab", "abc", "b", "bc",
-    ///         "c").</item>
+    ///         "c").</description></item>
     /// </list>
     /// </para>
     /// <para>You can make this filter use the old behavior by providing a version &lt;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
index 0fe3792..bd62835 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
@@ -85,11 +85,11 @@ namespace Lucene.Net.Analysis.NGram
     /// </para>
     /// <para>This tokenizer changed a lot in Lucene 4.4 in order to:
     /// <list type="bullet">
-    ///     <item>tokenize in a streaming fashion to support streams which are larger
+    ///     <item><description>tokenize in a streaming fashion to support streams which are larger
     ///         than 1024 chars (limit of the previous version),</item>
-    ///     <item>count grams based on unicode code points instead of java chars (and
+    ///     <item><description>count grams based on unicode code points instead of java chars (and
     ///         never split in the middle of surrogate pairs),</item>
-    ///     <item>give the ability to pre-tokenize the stream (<see cref="IsTokenChar(int)"/>)
+    ///     <item><description>give the ability to pre-tokenize the stream (<see cref="IsTokenChar(int)"/>)
     ///         before computing n-grams.</item>
     /// </list>
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
index 0718e8d..07ce34a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
@@ -41,14 +41,14 @@ namespace Lucene.Net.Analysis.Nl
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="DutchAnalyzer"/>:
     /// <list type="bullet">
-    ///   <item> As of 3.6, <see cref="DutchAnalyzer(LuceneVersion, CharArraySet)"/> and
+    ///   <item><description> As of 3.6, <see cref="DutchAnalyzer(LuceneVersion, CharArraySet)"/> and
     ///        <see cref="DutchAnalyzer(LuceneVersion, CharArraySet, CharArraySet)"/> also populate
-    ///        the default entries for the stem override dictionary</item>
-    ///   <item> As of 3.1, Snowball stemming is done with SnowballFilter, 
+    ///        the default entries for the stem override dictionary</description></item>
+    ///   <item><description> As of 3.1, Snowball stemming is done with SnowballFilter, 
     ///        LowerCaseFilter is used prior to StopFilter, and Snowball 
-    ///        stopwords are used by default.</item>
-    ///   <item> As of 2.9, StopFilter preserves position
-    ///        increments</item>
+    ///        stopwords are used by default.</description></item>
+    ///   <item><description> As of 2.9, StopFilter preserves position
+    ///        increments</description></item>
     /// </list>
     /// 
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
index 53f58bd..8717692 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
@@ -27,8 +27,8 @@ namespace Lucene.Net.Analysis.Pattern
     /// for the input stream.  It takes two arguments:  "pattern" and "group".
     /// <para/>
     /// <list type="bullet">
-    ///     <item>"pattern" is the regular expression.</item>
-    ///     <item>"group" says which group to extract into tokens.</item>
+    ///     <item><description>"pattern" is the regular expression.</description></item>
+    ///     <item><description>"group" says which group to extract into tokens.</description></item>
     /// </list>
     /// <para>
     /// group=-1 (the default) is equivalent to "split".  In this case, the tokens will

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
index 20e7897..c62521a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
@@ -29,8 +29,8 @@ namespace Lucene.Net.Analysis.Pattern
     /// for the input stream.  It takes two arguments:  "pattern" and "group".
     /// <para/>
     /// <list type="bullet">
-    ///     <item>"pattern" is the regular expression.</item>
-    ///     <item>"group" says which group to extract into tokens.</item>
+    ///     <item><description>"pattern" is the regular expression.</description></item>
+    ///     <item><description>"group" says which group to extract into tokens.</description></item>
     /// </list>
     /// <para>
     /// group=-1 (the default) is equivalent to "split".  In this case, the tokens will

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
index c31fccd..5f09576 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
@@ -32,7 +32,7 @@ namespace Lucene.Net.Analysis.Pt
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="PortugueseAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.6, PortugueseLightStemFilter is used for less aggressive stemming.</item>
+    ///     <item><description> As of 3.6, PortugueseLightStemFilter is used for less aggressive stemming.</description></item>
     /// </list>
     /// </para>
     /// </summary>


Mime
View raw message