lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From d...@apache.org
Subject [Lucene.Net] svn commit: r1082321 [3/4] - in /incubator/lucene.net/trunk/C#/src: Lucene.Net/Analysis/ Lucene.Net/Analysis/Standard/ Lucene.Net/Index/ Lucene.Net/Search/ Lucene.Net/Store/ Lucene.Net/Util/ Test/Index/ Test/Search/ Test/Util/
Date Wed, 16 Mar 2011 22:14:43 GMT
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermVectorsTermsWriterPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/TermVectorsTermsWriterPerField.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermVectorsTermsWriterPerField.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermVectorsTermsWriterPerField.cs Wed Mar 16 22:14:41 2011
@@ -82,18 +82,18 @@ namespace Lucene.Net.Index
 					perThread.doc.docID = docState.docID;
 					System.Diagnostics.Debug.Assert(perThread.doc.numVectorFields == 0);
 					System.Diagnostics.Debug.Assert(0 == perThread.doc.perDocTvf.Length());
-                    System.Diagnostics.Debug.Assert(0 == perThread.doc.perDocTvf.GetFilePointer());
-				}
-				else
-				{
-					System.Diagnostics.Debug.Assert(perThread.doc.docID == docState.docID);
-					
-					if (termsHashPerField.numPostings != 0)
-					// Only necessary if previous doc hit a
-					// non-aborting exception while writing vectors in
-					// this field:
-						termsHashPerField.Reset();
+					System.Diagnostics.Debug.Assert(0 == perThread.doc.perDocTvf.GetFilePointer());
 				}
+
+                System.Diagnostics.Debug.Assert(perThread.doc.docID == docState.docID);
+                if (termsHashPerField.numPostings != 0)
+                {
+                    // Only necessary if previous doc hit a
+                    // non-aborting exception while writing vectors in
+                    // this field:
+                    termsHashPerField.Reset();
+                    perThread.termsHashPerThread.Reset(false);
+                }
 			}
 			
 			// TODO: only if needed for performance
@@ -109,7 +109,7 @@ namespace Lucene.Net.Index
 		/// <summary>Called once per field per document if term vectors
 		/// are enabled, to write the vectors to
 		/// RAMOutputStream, which is then quickly flushed to
-		/// * the real term vectors files in the Directory. 
+		/// the real term vectors files in the Directory. 
 		/// </summary>
 		internal override void  Finish()
 		{
@@ -125,8 +125,8 @@ namespace Lucene.Net.Index
 			
 			if (numPostings > maxNumPostings)
 				maxNumPostings = numPostings;
-
-            IndexOutput tvf = perThread.doc.perDocTvf;
+			
+			IndexOutput tvf = perThread.doc.perDocTvf;
 			
 			// This is called once, after inverting all occurences
 			// of a given field in the doc.  At this point we flush
@@ -206,6 +206,12 @@ namespace Lucene.Net.Index
 			}
 			
 			termsHashPerField.Reset();
+
+            // NOTE: we clear, per-field, at the thread level,
+            // because term vectors fully write themselves on each
+            // field; this saves RAM (eg if large doc has two large
+            // fields w/ term vectors on) because we recycle/reuse
+            // all RAM after each field:
 			perThread.termsHashPerThread.Reset(false);
 		}
 		

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermsHash.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/TermsHash.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermsHash.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermsHash.cs Wed Mar 16 22:14:41 2011
@@ -39,9 +39,7 @@ namespace Lucene.Net.Index
 		internal int bytesPerPosting;
 		internal int postingsFreeChunk;
 		internal DocumentsWriter docWriter;
-		
-		private TermsHash primaryTermsHash;
-		
+						
 		private RawPostingList[] postingsFreeList = new RawPostingList[1];
 		private int postingsFreeCount;
 		private int postingsAllocCount;
@@ -78,25 +76,35 @@ namespace Lucene.Net.Index
 			this.fieldInfos = fieldInfos;
 			consumer.SetFieldInfos(fieldInfos);
 		}
-		
+
+        // NOTE: do not make this sync'd; it's not necessary (DW
+        // ensures all other threads are idle), and it leads to
+        // deadlock
 		public override void  Abort()
 		{
-			lock (this)
-			{
-				consumer.Abort();
-				if (nextTermsHash != null)
-					nextTermsHash.Abort();
-			}
+			consumer.Abort();
+			if (nextTermsHash != null)
+				nextTermsHash.Abort();
 		}
 		
 		internal void  ShrinkFreePostings(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
 		{
 			
 			System.Diagnostics.Debug.Assert(postingsFreeCount == postingsAllocCount, "Thread.currentThread().getName()" + ": postingsFreeCount=" + postingsFreeCount + " postingsAllocCount=" + postingsAllocCount + " consumer=" + consumer);
-			
-			int newSize = ArrayUtil.GetShrinkSize(postingsFreeList.Length, postingsAllocCount);
+
+            int newSize = 1;
 			if (newSize != postingsFreeList.Length)
 			{
+                if (postingsFreeCount > newSize)
+                {
+                    if (trackAllocations)
+                    {
+                        docWriter.BytesAllocated(-(postingsFreeCount - newSize) * bytesPerPosting);
+                    }
+                    postingsFreeCount = newSize;
+                    postingsAllocCount = newSize;
+                }
+
 				RawPostingList[] newArray = new RawPostingList[newSize];
 				Array.Copy(postingsFreeList, 0, newArray, 0, postingsFreeCount);
 				postingsFreeList = newArray;
@@ -172,36 +180,42 @@ namespace Lucene.Net.Index
 		
 		public override bool FreeRAM()
 		{
-			lock (this)
-			{
+			if (!trackAllocations)
+				return false;
 				
-				if (!trackAllocations)
-					return false;
-				
-				bool any;
-				int numToFree;
-				if (postingsFreeCount >= postingsFreeChunk)
-					numToFree = postingsFreeChunk;
-				else
-					numToFree = postingsFreeCount;
-				any = numToFree > 0;
-				if (any)
-				{
+			bool any;
+			long bytesFreed = 0;
+            lock (this)
+            {
+                int numToFree;
+                if (postingsFreeCount >= postingsFreeChunk)
+                    numToFree = postingsFreeChunk;
+                else
+                    numToFree = postingsFreeCount;
+                any = numToFree > 0;
+                if (any)
+                {
                     for (int i = postingsFreeCount - numToFree; i < postingsFreeCount; i++)
                     {
                         postingsFreeList[i] = null;
                     }
-					postingsFreeCount -= numToFree;
-					postingsAllocCount -= numToFree;
-					docWriter.BytesAllocated((- numToFree) * bytesPerPosting);
-					any = true;
-				}
+                    //Arrays.fill(postingsFreeList, postingsFreeCount - numToFree, postingsFreeCount, null);
+                    postingsFreeCount -= numToFree;
+                    postingsAllocCount -= numToFree;
+                    bytesFreed = -numToFree * bytesPerPosting;
+                    any = true;
+                }
+            }
+
+			if (any)
+			{
+                docWriter.BytesAllocated(bytesFreed);
+			}
 				
-				if (nextTermsHash != null)
-					any |= nextTermsHash.FreeRAM();
+			if (nextTermsHash != null)
+				any |= nextTermsHash.FreeRAM();
 				
-				return any;
-			}
+			return any;
 		}
 		
 		public void  RecyclePostings(RawPostingList[] postings, int numPostings)

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermsHashPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/TermsHashPerField.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermsHashPerField.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermsHashPerField.cs Wed Mar 16 22:14:41 2011
@@ -80,14 +80,8 @@ namespace Lucene.Net.Index
 		internal void  ShrinkHash(int targetSize)
 		{
 			System.Diagnostics.Debug.Assert(postingsCompacted || numPostings == 0);
-			
-			// Cannot use ArrayUtil.shrink because we require power
-			// of 2:
-			int newSize = postingsHash.Length;
-			while (newSize >= 8 && newSize / 4 > targetSize)
-			{
-				newSize /= 2;
-			}
+
+            int newSize = 4;
 			
 			if (newSize != postingsHash.Length)
 			{
@@ -96,6 +90,7 @@ namespace Lucene.Net.Index
 				postingsHashHalfSize = newSize / 2;
 				postingsHashMask = newSize - 1;
 			}
+            System.Array.Clear(postingsHash,0,postingsHash.Length);
 		}
 		
 		public void  Reset()

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/CachingSpanFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Search/CachingSpanFilter.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/CachingSpanFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/CachingSpanFilter.cs Wed Mar 16 22:14:41 2011
@@ -33,15 +33,45 @@ namespace Lucene.Net.Search
 		
 		/// <summary> A transient Filter cache.</summary>
 		[NonSerialized]
-		protected internal System.Collections.IDictionary cache;
-		
-		/// <param name="filter">Filter to cache results of
+        internal CachingWrapperFilter.FilterCache cache;
+
+        /// <summary>
+        /// New deletions always result in a cache miss, by default
+        /// ({@link CachingWrapperFilter.DeletesMode#RECACHE}.
+        /// <param name="filter">Filter to cache results of
 		/// </param>
-		public CachingSpanFilter(SpanFilter filter)
+        /// </summary>
+        public CachingSpanFilter(SpanFilter filter): this(filter, CachingWrapperFilter.DeletesMode.RECACHE)
 		{
-			this.filter = filter;
+			
 		}
-		
+
+        /**
+        * @param filter Filter to cache results of
+        * @param deletesMode See {@link CachingWrapperFilter.DeletesMode}
+        */
+        public CachingSpanFilter(SpanFilter filter, CachingWrapperFilter.DeletesMode deletesMode)
+        {
+            this.filter = filter;
+            if (deletesMode == CachingWrapperFilter.DeletesMode.DYNAMIC)
+            {
+                throw new System.ArgumentException("DeletesMode.DYNAMIC is not supported");
+            }
+            this.cache = new AnonymousFilterCache(deletesMode);
+        }
+
+        class AnonymousFilterCache : CachingWrapperFilter.FilterCache
+        {
+            public AnonymousFilterCache(CachingWrapperFilter.DeletesMode deletesMode) : base(deletesMode)
+            {
+            }
+
+            protected override object MergeDeletes(IndexReader reader, object docIdSet)
+            {
+                throw new System.ArgumentException("DeletesMode.DYNAMIC is not supported");
+            }
+        }
+
 		/// <deprecated> Use {@link #GetDocIdSet(IndexReader)} instead.
 		/// </deprecated>
         [Obsolete("Use GetDocIdSet(IndexReader) instead.")]
@@ -56,26 +86,26 @@ namespace Lucene.Net.Search
 			SpanFilterResult result = GetCachedResult(reader);
 			return result != null?result.GetDocIdSet():null;
 		}
-		
+
+        // for testing
+        public int hitCount, missCount;
+
 		private SpanFilterResult GetCachedResult(IndexReader reader)
 		{
-			SpanFilterResult result = null;
-			if (cache == null)
-			{
-                cache = new SupportClass.WeakHashTable();
-			}
-			
-			lock (cache.SyncRoot)
-			{
-				// check cache
-				result = (SpanFilterResult) cache[reader];
-				if (result == null)
-				{
-					result = filter.BitSpans(reader);
-					cache[reader] = result;
-				}
-			}
-			return result;
+            object coreKey = reader.GetFieldCacheKey();
+            object delCoreKey = reader.HasDeletions() ? reader.GetDeletesCacheKey() : coreKey;
+
+            SpanFilterResult result = (SpanFilterResult) cache.Get(reader, coreKey, delCoreKey);
+            if (result != null) {
+                hitCount++;
+                return result;
+            }
+
+            missCount++;
+            result = filter.BitSpans(reader);
+
+            cache.Put(coreKey, delCoreKey, result);
+            return result;
 		}
 		
 		

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/CachingWrapperFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Search/CachingWrapperFilter.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/CachingWrapperFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/CachingWrapperFilter.cs Wed Mar 16 22:14:41 2011
@@ -16,11 +16,13 @@
  */
 
 using System;
-
+using System.Collections;
+using System.Collections.Generic;
 using System.Runtime.InteropServices;
 using IndexReader = Lucene.Net.Index.IndexReader;
 using DocIdBitSet = Lucene.Net.Util.DocIdBitSet;
 using OpenBitSetDISI = Lucene.Net.Util.OpenBitSetDISI;
+using Lucene.Net.Util;
 
 namespace Lucene.Net.Search
 {
@@ -33,33 +35,195 @@ namespace Lucene.Net.Search
 	{
 		protected internal Filter filter;
 		
-		/// <summary> A transient Filter cache.</summary>
-		[NonSerialized]
-		protected internal System.Collections.IDictionary cache;
-		
-		/// <param name="filter">Filter to cache results of
-		/// </param>
-		public CachingWrapperFilter(Filter filter)
+	    /**
+       * Expert: Specifies how new deletions against a reopened
+       * reader should be handled.
+       *
+       * <p>The default is IGNORE, which means the cache entry
+       * will be re-used for a given segment, even when that
+       * segment has been reopened due to changes in deletions.
+       * This is a big performance gain, especially with
+       * near-real-timer readers, since you don't hit a cache
+       * miss on every reopened reader for prior segments.</p>
+       *
+       * <p>However, in some cases this can cause invalid query
+       * results, allowing deleted documents to be returned.
+       * This only happens if the main query does not rule out
+       * deleted documents on its own, such as a toplevel
+       * ConstantScoreQuery.  To fix this, use RECACHE to
+       * re-create the cached filter (at a higher per-reopen
+       * cost, but at faster subsequent search performance), or
+       * use DYNAMIC to dynamically intersect deleted docs (fast
+       * reopen time but some hit to search performance).</p>
+       */
+        [Serializable]
+        public class DeletesMode : Parameter
+        {
+            private DeletesMode(String name) : base(name)
+            {
+            }
+            public static DeletesMode IGNORE = new DeletesMode("IGNORE");
+            public static DeletesMode RECACHE = new DeletesMode("RECACHE");
+            public static DeletesMode DYNAMIC = new DeletesMode("DYNAMIC");
+        }
+
+		internal FilterCache cache;
+
+        [Serializable]
+        abstract internal class FilterCache 
+        {
+            /**
+             * A transient Filter cache (package private because of test)
+             */
+            // NOTE: not final so that we can dynamically re-init
+            // after de-serialize
+            volatile IDictionary cache;
+
+            private DeletesMode deletesMode;
+
+            public FilterCache(DeletesMode deletesMode)
+            {
+                this.deletesMode = deletesMode;
+            }
+
+            public Object Get(IndexReader reader, object coreKey, object delCoreKey)
+            {
+                lock (this)
+                {
+                    object value;
+
+                    if (cache == null)
+                    {
+                        cache = new SupportClass.WeakHashTable();
+                    }
+
+                    if (deletesMode == DeletesMode.IGNORE)
+                    {
+                        // key on core
+                        value = cache[coreKey];
+                    }
+                    else if (deletesMode == DeletesMode.RECACHE)
+                    {
+                        // key on deletes, if any, else core
+                        value = cache[delCoreKey];
+                    }
+                    else
+                    {
+
+                        System.Diagnostics.Debug.Assert(deletesMode == DeletesMode.DYNAMIC);
+
+                        // first try for exact match
+                        value = cache[delCoreKey];
+
+                        if (value == null)
+                        {
+                            // now for core match, but dynamically AND NOT
+                            // deletions
+                            value = cache[coreKey];
+                            if (value != null && reader.HasDeletions())
+                            {
+                                value = MergeDeletes(reader, value);
+                            }
+                        }
+                    }
+                    return value;
+                }
+
+            }
+       
+            protected abstract object MergeDeletes(IndexReader reader, object value);
+
+            public void Put(object coreKey, object delCoreKey, object value)
+            {
+                if (deletesMode == DeletesMode.IGNORE)
+                {
+                    cache[coreKey]= value;
+                }
+                else if (deletesMode == DeletesMode.RECACHE)
+                {
+                    cache[delCoreKey]=value;
+                }
+                else
+                {
+                    cache[coreKey]= value;
+                    cache[delCoreKey]= value;
+                }
+            }
+        }
+
+        /**
+          * New deletes are ignored by default, which gives higher
+          * cache hit rate on reopened readers.  Most of the time
+          * this is safe, because the filter will be AND'd with a
+          * Query that fully enforces deletions.  If instead you
+          * need this filter to always enforce deletions, pass
+          * either {@link DeletesMode#RECACHE} or {@link
+          * DeletesMode#DYNAMIC}.
+          * @param filter Filter to cache results of
+          */
+        public CachingWrapperFilter(Filter filter) : this(filter, DeletesMode.IGNORE)
 		{
-			this.filter = filter;
 		}
-		
+
+         /**
+   * Expert: by default, the cached filter will be shared
+   * across reopened segments that only had changes to their
+   * deletions.  
+   *
+   * @param filter Filter to cache results of
+   * @param deletesMode See {@link DeletesMode}
+   */
+        public CachingWrapperFilter(Filter filter, DeletesMode deletesMode)
+        {
+            this.filter = filter;
+            cache = new AnonymousFilterCache(deletesMode);
+            
+            //cache = new FilterCache(deletesMode) 
+            // {
+            //  public Object mergeDeletes(final IndexReader r, final Object docIdSet) {
+            //    return new FilteredDocIdSet((DocIdSet) docIdSet) {
+            //      protected boolean match(int docID) {
+            //        return !r.isDeleted(docID);
+            //      }
+            //    };
+            //  }
+            //};
+        }
+
+        class AnonymousFilterCache : FilterCache
+        {
+            class AnonymousFilteredDocIdSet : FilteredDocIdSet
+            {
+                IndexReader r;
+                public AnonymousFilteredDocIdSet(DocIdSet innerSet, IndexReader r) : base(innerSet)
+                {
+                    this.r = r;
+                }
+                public override bool Match(int docid)
+                {
+                    return !r.IsDeleted(docid);
+                }
+            }
+
+            public AnonymousFilterCache(DeletesMode deletesMode) : base(deletesMode)
+            {
+            }
+
+            protected  override object MergeDeletes(IndexReader reader, object docIdSet)
+            {
+                return new AnonymousFilteredDocIdSet((DocIdSet)docIdSet, reader);
+            }
+        }
+
 		/// <deprecated> Use {@link #GetDocIdSet(IndexReader)} instead.
 		/// </deprecated>
         [Obsolete("Use GetDocIdSet(IndexReader) instead.")]
 		public override System.Collections.BitArray Bits(IndexReader reader)
 		{
-			if (cache == null)
-			{
-                cache = new SupportClass.WeakHashTable();
-			}
-			
-			System.Object cached = null;
-			lock (cache.SyncRoot)
-			{
-				// check cache
-				cached = cache[reader];
-			}
+			object coreKey = reader.GetFieldCacheKey();
+            object delCoreKey = reader.HasDeletions() ? reader.GetDeletesCacheKey() : coreKey;
+
+            object cached = cache.Get(reader, coreKey, delCoreKey);
 			
 			if (cached != null)
 			{
@@ -73,12 +237,11 @@ namespace Lucene.Net.Search
 			}
 			
 			System.Collections.BitArray bits = filter.Bits(reader);
-			
-			lock (cache.SyncRoot)
-			{
-				// update cache
-				cache[reader] = bits;
-			}
+
+            if (bits != null)
+            {
+                cache.Put(coreKey, delCoreKey, bits);
+            }
 			
 			return bits;
 		}
@@ -89,8 +252,12 @@ namespace Lucene.Net.Search
 		/// </summary>
 		protected internal virtual DocIdSet DocIdSetToCache(DocIdSet docIdSet, IndexReader reader)
 		{
-			if (docIdSet.IsCacheable())
-			{
+            if (docIdSet == null)
+            {
+                // this is better than returning null, as the nonnull result can be cached
+                return DocIdSet.EMPTY_DOCIDSET;
+            }
+            else if (docIdSet.IsCacheable()) {
 				return docIdSet;
 			}
 			else
@@ -102,38 +269,32 @@ namespace Lucene.Net.Search
 				return (it == null) ? DocIdSet.EMPTY_DOCIDSET : new OpenBitSetDISI(it, reader.MaxDoc());
 			}
 		}
+
+        // for testing
+        public int hitCount, missCount;
 		
 		public override DocIdSet GetDocIdSet(IndexReader reader)
 		{
-			if (cache == null)
-			{
-                cache = new SupportClass.WeakHashTable();
-			}
-			
-			System.Object cached = null;
-			lock (cache.SyncRoot)
-			{
-				// check cache
-				cached = cache[reader];
-			}
+			object coreKey = reader.GetFieldCacheKey();
+            object delCoreKey = reader.HasDeletions() ? reader.GetDeletesCacheKey() : coreKey;
+
+            object cached = cache.Get(reader, coreKey, delCoreKey);
 			
 			if (cached != null)
 			{
+                hitCount++;
 				if (cached is DocIdSet)
 					return (DocIdSet) cached;
 				else
 					return new DocIdBitSet((System.Collections.BitArray) cached);
 			}
-			
+            missCount++;
+            // cache miss
 			DocIdSet docIdSet = DocIdSetToCache(filter.GetDocIdSet(reader), reader);
 			
 			if (docIdSet != null)
 			{
-				lock (cache.SyncRoot)
-				{
-					// update cache
-					cache[reader] = docIdSet;
-				}
+                cache.Put(coreKey, delCoreKey, docIdSet);
 			}
 			
 			return docIdSet;

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCache.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Search/FieldCache.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCache.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCache.cs Wed Mar 16 22:14:41 2011
@@ -194,6 +194,7 @@ namespace Lucene.Net.Search
 			NUMERIC_UTILS_DOUBLE_PARSER = new AnonymousClassDoubleParser1();
 		}
 	}
+    
 	[Serializable]
 	class AnonymousClassByteParser : ByteParser
 	{
@@ -664,6 +665,15 @@ namespace Lucene.Net.Search
 		/// <p/>
 		/// </summary>
 		void  PurgeAllCaches();
+
+        /// <summary>
+        /// Expert: drops all cache entries associated with this
+        /// reader.  NOTE: this reader must precisely match the
+        /// reader that the cache entry is keyed on. If you pass a
+        /// top-level reader, it usually will have no effect as
+        /// Lucene now caches at the segment reader level.
+        /// </summary>
+        void Purge(IndexReader r);
 		
 		/// <summary> If non-null, FieldCacheImpl will warn whenever
 		/// entries are created that are not sane according to

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCacheImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Search/FieldCacheImpl.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCacheImpl.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCacheImpl.cs Wed Mar 16 22:14:41 2011
@@ -70,6 +70,14 @@ namespace Lucene.Net.Search
 		{
 			Init();
 		}
+
+        public void Purge(IndexReader r)
+        {
+            foreach (Cache c in caches.Values)
+            {
+                c.Purge(r);
+            }
+        }
 		
 		public virtual CacheEntry[] GetCacheEntries()
 		{
@@ -205,6 +213,16 @@ namespace Lucene.Net.Search
             internal System.Collections.IDictionary readerCache = new SupportClass.WeakHashTable();
 			
 			protected internal abstract System.Object CreateValue(IndexReader reader, Entry key);
+
+            /** Remove this reader from the cache, if present. */
+            public void Purge(IndexReader r)
+            {
+                object readerKey = r.GetFieldCacheKey();
+                lock (readerCache)
+                {
+                    readerCache.Remove(readerKey);
+                }
+            }
 			
 			public virtual System.Object Get(IndexReader reader, Entry key)
 			{
@@ -863,15 +881,9 @@ namespace Lucene.Net.Search
 					do 
 					{
 						Term term = termEnum.Term();
-						if (term == null || (System.Object) term.Field() != (System.Object) field)
-							break;
+                        if (term == null || term.Field() != field || t >= mterms.Length) break;
 						
 						// store term text
-						// we expect that there is at most one term per document
-						if (t >= mterms.Length)
-							//throw new System.SystemException("there are more terms than " + "documents in field \"" + field + "\", but it's impossible to sort on " + "tokenized fields");
-                            //LUCENENET-388
-                            throw new System.IO.IOException("there are more terms than " + "documents in field \"" + field + "\", but it's impossible to sort on " + "tokenized fields");
 						mterms[t] = term.Text();
 						
 						termDocs.Seek(termEnum);

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldDoc.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Search/FieldDoc.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldDoc.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldDoc.cs Wed Mar 16 22:14:41 2011
@@ -83,7 +83,7 @@ namespace Lucene.Net.Search
 			}
 			sb.Length -= 2; // discard last ", "
 			sb.Append("]");
-			return base.ToString();
+			return sb.ToString();
 		}
 
         #region SERIALIZATION

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/IndexSearcher.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Search/IndexSearcher.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/IndexSearcher.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/IndexSearcher.cs Wed Mar 16 22:14:41 2011
@@ -185,7 +185,8 @@ namespace Lucene.Net.Search
 			{
 				throw new System.ArgumentException("nDocs must be > 0");
 			}
-			
+            nDocs = Math.Min(nDocs, reader.MaxDoc());
+
 			TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.ScoresDocsOutOfOrder());
 			Search(weight, filter, collector);
 			return collector.TopDocs();
@@ -209,7 +210,8 @@ namespace Lucene.Net.Search
 		/// </summary>
 		public virtual TopFieldDocs Search(Weight weight, Filter filter, int nDocs, Sort sort, bool fillFields)
 		{
-			
+            nDocs = Math.Min(nDocs, reader.MaxDoc());
+
 			SortField[] fields = sort.fields;
 			bool legacy = false;
 			for (int i = 0; i < fields.Length; i++)

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TimeLimitingCollector.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Search/TimeLimitingCollector.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TimeLimitingCollector.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TimeLimitingCollector.cs Wed Mar 16 22:14:41 2011
@@ -121,7 +121,7 @@ namespace Lucene.Net.Search
 			{
 				return timeElapsed;
 			}
-			/// <summary>Returns last doc that was collected when the search time exceeded. </summary>
+            /// <summary>Returns last doc(absolute doc id) that was collected when the search time exceeded. </summary>
 			public virtual int GetLastDocCollected()
 			{
 				return lastDocCollected;
@@ -136,6 +136,8 @@ namespace Lucene.Net.Search
 		private long t0;
 		private long timeout;
 		private Collector collector;
+
+        private int docBase;
 		
 		/// <summary> Create a TimeLimitedCollector wrapper over another {@link Collector} with a specified timeout.</summary>
 		/// <param name="collector">the wrapped {@link Collector}
@@ -217,7 +219,7 @@ namespace Lucene.Net.Search
 					collector.Collect(doc);
 				}
 				//System.out.println(this+"  failing on:  "+doc+"  "+(time-t0));
-				throw new TimeExceededException(timeout - t0, time - t0, doc);
+                throw new TimeExceededException(timeout - t0, time - t0, docBase + doc);
 			}
 			//System.out.println(this+"  collecting: "+doc+"  "+(time-t0));
 			collector.Collect(doc);
@@ -226,6 +228,7 @@ namespace Lucene.Net.Search
 		public override void  SetNextReader(IndexReader reader, int base_Renamed)
 		{
 			collector.SetNextReader(reader, base_Renamed);
+            this.docBase = base_Renamed;
 		}
 		
 		public override void  SetScorer(Scorer scorer)

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/WildcardTermEnum.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Search/WildcardTermEnum.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/WildcardTermEnum.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/WildcardTermEnum.cs Wed Mar 16 22:14:41 2011
@@ -175,8 +175,9 @@ namespace Lucene.Net.Search
 				//
 				if (pattern[p] == WILDCARD_STRING)
 				{
-					// Look at the character beyond the '*'.
-					++p;
+                    // Look at the character beyond the '*' characters.
+                    while (p < pattern.Length && pattern[p] == WILDCARD_STRING)
+                        ++p;
 					// Examine the string, starting at the last character.
 					for (int i = string_Renamed.Length; i >= s; --i)
 					{

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/Directory.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Store/Directory.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/Directory.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/Directory.cs Wed Mar 16 22:14:41 2011
@@ -187,6 +187,11 @@ namespace Lucene.Net.Store
 		{
 			return this.ToString();
 		}
+
+        public override string ToString()
+        {
+            return base.ToString() + " lockFactory=" + GetLockFactory();
+        }
 		
 		/// <summary> Copy contents of a directory src to a directory dest.
 		/// If a file in src already exists in dest then the

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FSDirectory.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Store/FSDirectory.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FSDirectory.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FSDirectory.cs Wed Mar 16 22:14:41 2011
@@ -47,7 +47,12 @@ namespace Lucene.Net.Store
 	/// href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6265734">Sun
 	/// JRE bug</a> this is a poor choice for Windows, but
 	/// on all other platforms this is the preferred
-	/// choice.</li>
+	/// choice. Applications using {@link Thread#interrupt()} or
+    /// <code>Future#cancel(boolean)</code> (on Java 1.5) should use
+    /// {@link SimpleFSDirectory} instead. See {@link NIOFSDirectory} java doc
+    /// for details.
+    ///        
+    ///        
 	/// 
 	/// <li> {@link MMapDirectory} uses memory-mapped IO when
 	/// reading. This is a good choice if you have plenty
@@ -73,6 +78,11 @@ namespace Lucene.Net.Store
 	/// an important limitation to be aware of. This class supplies a
 	/// (possibly dangerous) workaround mentioned in the bug report,
 	/// which may fail on non-Sun JVMs.</li>
+    ///       
+    /// Applications using {@link Thread#interrupt()} or
+    /// <code>Future#cancel(boolean)</code> (on Java 1.5) should use
+    /// {@link SimpleFSDirectory} instead. See {@link MMapDirectory}
+    /// java doc for details.
 	/// </ul>
 	/// 
 	/// Unfortunately, because of system peculiarities, there is
@@ -945,7 +955,7 @@ namespace Lucene.Net.Store
 		/// <summary>For debug output. </summary>
 		public override System.String ToString()
 		{
-			return this.GetType().FullName + "@" + directory;
+            return this.GetType().FullName + "@" + directory + " lockFactory=" + GetLockFactory();
 		}
 		
 		/// <summary> Default read chunk size.  This is a conditional

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FileSwitchDirectory.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Store/FileSwitchDirectory.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FileSwitchDirectory.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FileSwitchDirectory.cs Wed Mar 16 22:14:41 2011
@@ -78,12 +78,10 @@ namespace Lucene.Net.Store
 		
 		public override System.String[] ListAll()
 		{
-			System.String[] primaryFiles = primaryDir.ListAll();
-			System.String[] secondaryFiles = secondaryDir.ListAll();
-			System.String[] files = new System.String[primaryFiles.Length + secondaryFiles.Length];
-			Array.Copy(primaryFiles, 0, files, 0, primaryFiles.Length);
-			Array.Copy(secondaryFiles, 0, files, primaryFiles.Length, secondaryFiles.Length);
-			return files;
+            System.Collections.Generic.List<string> files = new System.Collections.Generic.List<string>();
+            files.AddRange(primaryDir.ListAll());
+            files.AddRange(secondaryDir.ListAll());
+            return files.ToArray();
 		}
 
         [Obsolete("Lucene.Net-2.9.1. This method overrides obsolete member Lucene.Net.Store.Directory.List()")]

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/IndexOutput.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Store/IndexOutput.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/IndexOutput.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/IndexOutput.cs Wed Mar 16 22:14:41 2011
@@ -31,7 +31,6 @@ namespace Lucene.Net.Store
 	/// </seealso>
 	public abstract class IndexOutput
 	{
-		
 		/// <summary>Writes a single byte.</summary>
 		/// <seealso cref="IndexInput.ReadByte()">
 		/// </seealso>
@@ -117,7 +116,7 @@ namespace Lucene.Net.Store
 		/// </seealso>
 		public virtual void  WriteString(System.String s)
 		{
-			UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result();
+            UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result();
 			UnicodeUtil.UTF16toUTF8(s, 0, s.Length, utf8Result);
 			WriteVInt(utf8Result.length);
 			WriteBytes(utf8Result.result, 0, utf8Result.length);

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/MMapDirectory.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Store/MMapDirectory.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/MMapDirectory.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/MMapDirectory.cs Wed Mar 16 22:14:41 2011
@@ -317,6 +317,8 @@ namespace Lucene.Net.Store
 			
 			public override System.Object Clone()
 			{
+                if (buffer == null)
+                    throw new AlreadyClosedException("MMapIndexInput already closed");
 				MMapIndexInput clone = (MMapIndexInput) base.Clone();
 				clone.isClone = true;
 				// clone.buffer = buffer.duplicate();   // {{Aroush-1.9}}

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/NativeFSLockFactory.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Store/NativeFSLockFactory.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/NativeFSLockFactory.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/NativeFSLockFactory.cs Wed Mar 16 22:14:41 2011
@@ -52,60 +52,6 @@ namespace Lucene.Net.Store
 	
 	public class NativeFSLockFactory:FSLockFactory
 	{
-		
-		private volatile bool tested = false;
-		
-		// Simple test to verify locking system is "working".  On
-		// NFS, if it's misconfigured, you can hit long (35
-		// second) timeouts which cause Lock.obtain to take far
-		// too long (it assumes the obtain() call takes zero
-		// time). 
-		private void  AcquireTestLock()
-		{
-			lock (this)
-			{
-				if (tested)
-					return ;
-				tested = true;
-				
-				// Ensure that lockDir exists and is a directory.
-				bool tmpBool;
-				if (System.IO.File.Exists(lockDir.FullName))
-					tmpBool = true;
-				else
-					tmpBool = System.IO.Directory.Exists(lockDir.FullName);
-				if (!tmpBool)
-				{
-					try
-                    {
-                        System.IO.Directory.CreateDirectory(lockDir.FullName);
-                    }
-                    catch
-                    {
-						throw new System.SystemException("Cannot create directory: " + lockDir.FullName);
-                    }
-				}
-				else if (!System.IO.Directory.Exists(lockDir.FullName))
-				{
-					throw new System.SystemException("Found regular file where directory expected: " + lockDir.FullName);
-				}
-				
-				System.String randomLockName = "lucene-" + System.Convert.ToString(new System.Random().Next(), 16) + "-test.lock";
-				
-				Lock l = MakeLock(randomLockName);
-				try
-				{
-					l.Obtain();
-					l.Release();
-				}
-				catch (System.IO.IOException e)
-				{
-					System.SystemException e2 = new System.SystemException("Failed to acquire random test lock; please verify filesystem for lock directory '" + lockDir + "' supports locking", e);
-					throw e2;
-				}
-			}
-		}
-		
 		/// <summary> Create a NativeFSLockFactory instance, with null (unset)
 		/// lock directory. When you pass this factory to a {@link FSDirectory}
 		/// subclass, the lock directory is automatically set to the
@@ -152,7 +98,6 @@ namespace Lucene.Net.Store
 		{
 			lock (this)
 			{
-				AcquireTestLock();
 				if (lockPrefix != null)
 					lockName = lockPrefix + "-" + lockName;
 				return new NativeFSLock(lockDir, lockName);
@@ -441,7 +386,31 @@ namespace Lucene.Net.Store
 					if (!tmpBool)
 						throw new LockReleaseFailedException("failed to delete " + path);
 				}
-			}
+                /* From Java 2.9.4 {{DIGY}}
+                      // LUCENE-2421: we don't care anymore if the file cannot be deleted
+                      // because it's held up by another process (e.g. AntiVirus). NativeFSLock
+                      // does not depend on the existence/absence of the lock file
+                      path.delete();
+                    } else {
+                      // if we don't hold the lock, and somebody still called release(), for
+                      // example as a result of calling IndexWriter.unlock(), we should attempt
+                      // to obtain the lock and release it. If the obtain fails, it means the
+                      // lock cannot be released, and we should throw a proper exception rather
+                      // than silently failing/not doing anything.
+                      boolean obtained = false;
+                      try {
+                        if (!(obtained = obtain())) {
+                          throw new LockReleaseFailedException(
+                              "Cannot forcefully unlock a NativeFSLock which is held by another indexer component: "
+                                  + path);
+                        }
+                      } finally {
+                        if (obtained) {
+                          release();
+                        }
+                      }
+                */
+            }
 		}
 		
 		public override bool IsLocked()

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/RAMFile.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Store/RAMFile.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/RAMFile.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/RAMFile.cs Wed Mar 16 22:14:41 2011
@@ -19,8 +19,7 @@ using System;
 
 namespace Lucene.Net.Store
 {
-
-    /** For Lucene internal use */
+	
 	[Serializable]
 	public class RAMFile
 	{
@@ -30,13 +29,13 @@ namespace Lucene.Net.Store
 		protected System.Collections.ArrayList buffers = new System.Collections.ArrayList();
 		internal long length;
 		internal RAMDirectory directory;
-		protected internal long sizeInBytes; 
+		internal long sizeInBytes; 
 		
 		// This is publicly modifiable via Directory.touchFile(), so direct access not supported
 		private long lastModified = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond);
 		
 		// File used as buffer, in no RAMDirectory
-        protected internal RAMFile()
+		public /*internal*/ RAMFile()
 		{
 		}
 		
@@ -46,15 +45,15 @@ namespace Lucene.Net.Store
 		}
 		
 		// For non-stream access from thread that might be concurrent with writing
-		public virtual long GetLength()
+		public /*internal*/ virtual long GetLength()
 		{
 			lock (this)
 			{
 				return length;
 			}
 		}
-
-        public /*internal*/ virtual void SetLength(long length)
+		
+		public /*internal*/ virtual void  SetLength(long length)
 		{
 			lock (this)
 			{
@@ -63,7 +62,7 @@ namespace Lucene.Net.Store
 		}
 		
 		// For non-stream access from thread that might be concurrent with writing
-		public virtual long GetLastModified()
+		internal virtual long GetLastModified()
 		{
 			lock (this)
 			{
@@ -71,7 +70,7 @@ namespace Lucene.Net.Store
 			}
 		}
 		
-		protected internal virtual void  SetLastModified(long lastModified)
+		internal virtual void  SetLastModified(long lastModified)
 		{
 			lock (this)
 			{
@@ -79,7 +78,7 @@ namespace Lucene.Net.Store
 			}
 		}
 		
-		protected internal byte[] AddBuffer(int size)
+		internal byte[] AddBuffer(int size)
 		{
             byte[] buffer = NewBuffer(size);
             lock (this)
@@ -98,16 +97,16 @@ namespace Lucene.Net.Store
 
             return buffer;
 		}
-
-        public /*internal*/ byte[] GetBuffer(int index)
+		
+		public /*internal*/ byte[] GetBuffer(int index)
 		{
 			lock (this)
 			{
 				return (byte[]) buffers[index];
 			}
 		}
-
-        public /*internal*/ int NumBuffers()
+		
+		public /*internal*/ int NumBuffers()
 		{
 			lock (this)
 			{
@@ -128,7 +127,7 @@ namespace Lucene.Net.Store
 		}
 		
 		
-		public virtual long GetSizeInBytes()
+		public /*internal*/ virtual long GetSizeInBytes()
 		{
             lock (this)
             {

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/RAMOutputStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Store/RAMOutputStream.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/RAMOutputStream.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/RAMOutputStream.cs Wed Mar 16 22:14:41 2011
@@ -23,7 +23,7 @@ namespace Lucene.Net.Store
 	/// <summary> A memory-resident {@link IndexOutput} implementation.
 	/// 
 	/// </summary>
-    /// <version>  $Id: RAMOutputStream.java 941125 2010-05-05 00:44:15Z mikemccand $
+	/// <version>  $Id: RAMOutputStream.java 691694 2008-09-03 17:34:29Z mikemccand $
 	/// </version>
 	
 	public class RAMOutputStream:IndexOutput
@@ -44,7 +44,7 @@ namespace Lucene.Net.Store
 		{
 		}
 		
-		public RAMOutputStream(RAMFile f)
+		public /*internal*/ RAMOutputStream(RAMFile f)
 		{
 			file = f;
 			
@@ -75,14 +75,15 @@ namespace Lucene.Net.Store
 			}
 		}
 		
-		/// <summary>Resets this to an empty file. </summary>
+		/// <summary>Resets this to an empty buffer. </summary>
 		public virtual void  Reset()
-        {
+		{
             currentBuffer = null;
             currentBufferIndex = -1;
             bufferPosition = 0;
             bufferStart = 0;
             bufferLength = 0;
+			
 			file.SetLength(0);
 		}
 		

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/BitVector.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Util/BitVector.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/BitVector.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/BitVector.cs Wed Mar 16 22:14:41 2011
@@ -40,26 +40,30 @@ namespace Lucene.Net.Util
 		
 		private byte[] bits;
 		private int size;
-		private int count = - 1;
+		private int count;
 		
 		/// <summary>Constructs a vector capable of holding <code>n</code> bits. </summary>
 		public BitVector(int n)
 		{
 			size = n;
 			bits = new byte[(size >> 3) + 1];
+            count = 0;
 		}
 		
 		internal BitVector(byte[] bits, int size)
 		{
 			this.bits = bits;
 			this.size = size;
+            count = -1;
 		}
 		
 		public System.Object Clone()
 		{
 			byte[] copyBits = new byte[bits.Length];
 			Array.Copy(bits, 0, copyBits, 0, bits.Length);
-			return new BitVector(copyBits, size);
+            BitVector clone = new BitVector(copyBits, size);
+            clone.count = count;
+            return clone;
 		}
 		
 		/// <summary>Sets the value of <code>bit</code> to one. </summary>
@@ -141,6 +145,18 @@ namespace Lucene.Net.Util
 			}
 			return count;
 		}
+
+        /// <summary>
+        /// For testing 
+        /// </summary>
+        public int GetRecomputedCount()
+        {
+            int c = 0;
+            int end = bits.Length;
+            for (int i = 0; i < end; i++)
+                c += BYTE_COUNTS[bits[i] & 0xFF];	  // sum bits per byte
+            return c;
+        }
 		
 		private static readonly byte[] BYTE_COUNTS = new byte[]{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8};
 		

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/Constants.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Util/Constants.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/Constants.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/Constants.cs Wed Mar 16 22:14:41 2011
@@ -70,7 +70,7 @@ namespace Lucene.Net.Util
             return s.ToString();
         }
 
-		public static readonly System.String LUCENE_MAIN_VERSION = Ident("2.9.2");
+		public static readonly System.String LUCENE_MAIN_VERSION = Ident("2.9.4");
 		
 		public static System.String LUCENE_VERSION;
 		static Constants()

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/NumericUtils.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Util/NumericUtils.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/NumericUtils.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/NumericUtils.cs Wed Mar 16 22:14:41 2011
@@ -387,8 +387,10 @@ namespace Lucene.Net.Util
 				bool hasUpper = (maxBound & mask) != mask;
 				long nextMinBound = (hasLower?(minBound + diff):minBound) & ~ mask;
 				long nextMaxBound = (hasUpper?(maxBound - diff):maxBound) & ~ mask;
-				
-				if (shift + precisionStep >= valSize || nextMinBound > nextMaxBound)
+				bool lowerWrapped = nextMinBound < minBound,
+                     upperWrapped = nextMaxBound > maxBound;
+      
+                if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) 
 				{
 					// We are in the lowest precision or the next precision is not available.
 					AddRange(builder, valSize, minBound, maxBound, shift);

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/OpenBitSet.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Util/OpenBitSet.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/OpenBitSet.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/OpenBitSet.cs Wed Mar 16 22:14:41 2011
@@ -933,17 +933,22 @@ namespace Lucene.Net.Util
 			
 			return true;
 		}
+
+        public override int GetHashCode()
+        {
+            // Start with a zero hash and use a mix that results in zero if the input is zero.
+            // This effectively truncates trailing zeros without an explicit check.
+            long h = 0;
+            for (int i = bits.Length; --i >= 0; )
+            {
+                h ^= bits[i];
+                h = (h << 1) | (SupportClass.Number.URShift(h, 63)); // rotate left
+            }
+            // fold leftmost bits into right and add a constant to prevent
+            // empty sets from returning 0, which is too common.
+            return (int)(((h >> 32) ^ h) + 0x98761234);
+        }
+
 		
-		
-		public override int GetHashCode()
-		{
-			long h = 0x98761234; // something non-zero for length==0
-			for (int i = bits.Length; --i >= 0; )
-			{
-				h ^= bits[i];
-				h = (h << 1) | ((long) ((ulong) h >> 63)); // rotate left
-			}
-			return (int) ((h >> 32) ^ h); // fold leftmost bits into right
-		}
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/PriorityQueue.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Util/PriorityQueue.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/PriorityQueue.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/PriorityQueue.cs Wed Mar 16 22:14:41 2011
@@ -88,11 +88,30 @@ namespace Lucene.Net.Util
 		{
 			size = 0;
 			int heapSize;
-			if (0 == maxSize)
-			// We allocate 1 extra to avoid if statement in top()
-				heapSize = 2;
-			else
-				heapSize = maxSize + 1;
+            if (0 == maxSize)
+                // We allocate 1 extra to avoid if statement in top()
+                heapSize = 2;
+            else
+            {
+                if (maxSize == Int32.MaxValue)
+                {
+                    // Don't wrap heapSize to -1, in this case, which
+                    // causes a confusing NegativeArraySizeException.
+                    // Note that very likely this will simply then hit
+                    // an OOME, but at least that's more indicative to
+                    // caller that this values is too big.  We don't +1
+                    // in this case, but it's very unlikely in practice
+                    // one will actually insert this many objects into
+                    // the PQ:
+                    heapSize = Int32.MaxValue;
+                }
+                else
+                {
+                    // NOTE: we add +1 because all access to heap is
+                    // 1-based not 0-based.  heap[0] is unused.
+                    heapSize = maxSize + 1;
+                }
+            }
 			heap = new System.Object[heapSize];
 			this.maxSize = maxSize;
 			

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestBackwardsCompatibility.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestBackwardsCompatibility.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestBackwardsCompatibility.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestBackwardsCompatibility.cs Wed Mar 16 22:14:41 2011
@@ -115,8 +115,8 @@ namespace Lucene.Net.Index
 			CreateIndex(dirName, true);
 			RmDir(dirName);
 		}
-		
-		internal System.String[] oldNames = new System.String[]{"19.cfs", "19.nocfs", "20.cfs", "20.nocfs", "21.cfs", "21.nocfs", "22.cfs", "22.nocfs", "23.cfs", "23.nocfs", "24.cfs", "24.nocfs"};
+
+        internal System.String[] oldNames = new System.String[]{"19.cfs", "19.nocfs", "20.cfs", "20.nocfs", "21.cfs", "21.nocfs", "22.cfs", "22.nocfs", "23.cfs", "23.nocfs", "24.cfs", "24.nocfs","30.cfs","30.nocfs"};
 		
 		[Test]
 		public virtual void  TestOptimizeOldIndex()

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestByteSlices.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestByteSlices.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestByteSlices.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestByteSlices.cs Wed Mar 16 22:14:41 2011
@@ -29,41 +29,51 @@ namespace Lucene.Net.Index
 	[TestFixture]
 	public class TestByteSlices:LuceneTestCase
 	{
-		
-		private class ByteBlockAllocator:ByteBlockPool.Allocator
-		{
-			internal System.Collections.ArrayList freeByteBlocks = new System.Collections.ArrayList();
-			
-			/* Allocate another byte[] from the shared pool */
-			public /*internal*/ override byte[] GetByteBlock(bool trackAllocations)
-			{
-				lock (this)
-				{
-					int size = freeByteBlocks.Count;
-					byte[] b;
-					if (0 == size)
-						b = new byte[DocumentsWriter.BYTE_BLOCK_SIZE_ForNUnit];
-					else
-					{
-						System.Object tempObject;
-						tempObject = freeByteBlocks[size - 1];
-						freeByteBlocks.RemoveAt(size - 1);
-						b = (byte[]) tempObject;
-					}
-					return b;
-				}
-			}
-			
-			/* Return a byte[] to the pool */
-			public /*internal*/ override void  RecycleByteBlocks(byte[][] blocks, int start, int end)
-			{
-				lock (this)
-				{
-					for (int i = start; i < end; i++)
-						freeByteBlocks.Add(blocks[i]);
-				}
-			}
-		}
+
+        private class ByteBlockAllocator : ByteBlockPool.Allocator
+        {
+            internal System.Collections.ArrayList freeByteBlocks = new System.Collections.ArrayList();
+
+            /* Allocate another byte[] from the shared pool */
+            public /*internal*/ override byte[] GetByteBlock(bool trackAllocations)
+            {
+                lock (this)
+                {
+                    int size = freeByteBlocks.Count;
+                    byte[] b;
+                    if (0 == size)
+                        b = new byte[DocumentsWriter.BYTE_BLOCK_SIZE_ForNUnit];
+                    else
+                    {
+                        System.Object tempObject;
+                        tempObject = freeByteBlocks[size - 1];
+                        freeByteBlocks.RemoveAt(size - 1);
+                        b = (byte[])tempObject;
+                    }
+                    return b;
+                }
+            }
+
+            /* Return a byte[] to the pool */
+            public /*internal*/ override void RecycleByteBlocks(byte[][] blocks, int start, int end)
+            {
+                lock (this)
+                {
+                    for (int i = start; i < end; i++)
+                        freeByteBlocks.Add(blocks[i]);
+                }
+            }
+
+            public override void RecycleByteBlocks(System.Collections.ArrayList blocks)
+            {
+                lock (this)
+                {
+                    int size = blocks.Count;
+                    for (int i = 0; i < size; i++)
+                        freeByteBlocks.Add((byte[])blocks[i]);
+                }
+            }
+        }
 		
 		[Test]
 		public virtual void  TestBasic()

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestDoc.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestDoc.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestDoc.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestDoc.cs Wed Mar 16 22:14:41 2011
@@ -213,7 +213,7 @@ namespace Lucene.Net.Index
 			
 			if (useCompoundFile)
 			{
-				System.Collections.IList filesToDelete = merger.CreateCompoundFile(merged + ".cfs");
+				System.Collections.Generic.ICollection<string> filesToDelete = merger.CreateCompoundFile(merged + ".cfs");
 				for (System.Collections.IEnumerator iter = filesToDelete.GetEnumerator(); iter.MoveNext(); )
 				{
 					si1.dir.DeleteFile((System.String) iter.Current);

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexReader.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReader.cs Wed Mar 16 22:14:41 2011
@@ -1101,7 +1101,7 @@ namespace Lucene.Net.Index
 					System.String[] startFiles = dir.ListAll();
 					SegmentInfos infos = new SegmentInfos();
 					infos.Read(dir);
-					new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
+					new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null,null);
 					System.String[] endFiles = dir.ListAll();
 					
 					System.Array.Sort(startFiles);

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReaderReopen.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexReaderReopen.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReaderReopen.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReaderReopen.cs Wed Mar 16 22:14:41 2011
@@ -1663,7 +1663,7 @@ namespace Lucene.Net.Index
 			IndexReader r = IndexReader.Open(dir);
 			Assert.AreEqual(0, r.NumDocs());
 			Assert.AreEqual(4, r.MaxDoc());
-			
+                        
 			System.Collections.IEnumerator it = IndexReader.ListCommits(dir).GetEnumerator();
 			while (it.MoveNext())
 			{

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexWriter.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriter.cs Wed Mar 16 22:14:41 2011
@@ -985,7 +985,7 @@ namespace Lucene.Net.Index
 			System.String[] startFiles = dir.ListAll();
 			SegmentInfos infos = new SegmentInfos();
 			infos.Read(dir);
-			new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
+			new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null,null);
 			System.String[] endFiles = dir.ListAll();
 			
 			System.Array.Sort(startFiles);

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterDelete.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexWriterDelete.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterDelete.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterDelete.cs Wed Mar 16 22:14:41 2011
@@ -916,7 +916,7 @@ namespace Lucene.Net.Index
 				System.String[] startFiles = dir.ListAll();
 				SegmentInfos infos = new SegmentInfos();
 				infos.Read(dir);
-				new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
+				new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null,null);
 				System.String[] endFiles = dir.ListAll();
 				
 				if (!SupportClass.CollectionsHelper.CompareStringArrays(startFiles, endFiles))

Added: incubator/lucene.net/trunk/C#/src/Test/Index/index.30.cfs.zip
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/index.30.cfs.zip?rev=1082321&view=auto
==============================================================================
Binary file - no diff available.

Propchange: incubator/lucene.net/trunk/C#/src/Test/Index/index.30.cfs.zip
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: incubator/lucene.net/trunk/C#/src/Test/Index/index.30.nocfs.zip
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/index.30.nocfs.zip?rev=1082321&view=auto
==============================================================================
Binary file - no diff available.

Propchange: incubator/lucene.net/trunk/C#/src/Test/Index/index.30.nocfs.zip
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Modified: incubator/lucene.net/trunk/C#/src/Test/Search/CachingWrapperFilterHelper.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Search/CachingWrapperFilterHelper.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Search/CachingWrapperFilterHelper.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Search/CachingWrapperFilterHelper.cs Wed Mar 16 22:14:41 2011
@@ -23,76 +23,60 @@ using IndexReader = Lucene.Net.Index.Ind
 
 namespace Lucene.Net.Search
 {
-	
-	/// <summary> A unit test helper class to test when the filter is getting cached and when it is not.</summary>
-	[Serializable]
-	public class CachingWrapperFilterHelper:CachingWrapperFilter
-	{
-		
-		private bool shouldHaveCache = false;
-		
-		/// <param name="filter">Filter to cache results of
-		/// </param>
-		public CachingWrapperFilterHelper(Filter filter):base(filter)
-		{
-		}
-		
-		public virtual void  SetShouldHaveCache(bool shouldHaveCache)
-		{
-			this.shouldHaveCache = shouldHaveCache;
-		}
-		
-		public override DocIdSet GetDocIdSet(IndexReader reader)
-		{
-			if (cache == null)
-			{
-				cache = new System.Collections.Hashtable();
-			}
-			
-			lock (cache.SyncRoot)
-			{
-				// check cache
-				DocIdSet cached = (DocIdSet) cache[reader];
-				if (shouldHaveCache)
-				{
-					Assert.IsNotNull(cached, "Cache should have data ");
-				}
-				else
-				{
-					Assert.IsNotNull( cached, "Cache should be null " + cached);
-				}
-				if (cached != null)
-				{
-					return cached;
-				}
-			}
-			
-			DocIdSet bits = filter.GetDocIdSet(reader);
-			
-			lock (cache.SyncRoot)
-			{
-				// update cache
-				cache[reader] = bits;
-			}
-			
-			return bits;
-		}
-		
-		public override System.String ToString()
-		{
-			return "CachingWrapperFilterHelper(" + filter + ")";
-		}
-		
-		public  override bool Equals(System.Object o)
-		{
-			if (!(o is CachingWrapperFilterHelper))
-				return false;
-			return this.filter.Equals((CachingWrapperFilterHelper) o);
-		}
-		
-		public override int GetHashCode()
-		{
-			return this.filter.GetHashCode() ^ 0x5525aacb;
-		}
-	}
+
+    /// <summary> A unit test helper class to test when the filter is getting cached and when it is not.</summary>
+    [Serializable]
+    public class CachingWrapperFilterHelper : CachingWrapperFilter
+    {
+
+        private bool shouldHaveCache = false;
+
+        /// <param name="filter">Filter to cache results of
+        /// </param>
+        public CachingWrapperFilterHelper(Filter filter)
+            : base(filter)
+        {
+        }
+
+        public virtual void SetShouldHaveCache(bool shouldHaveCache)
+        {
+            this.shouldHaveCache = shouldHaveCache;
+        }
+
+        public override DocIdSet GetDocIdSet(IndexReader reader)
+        {
+            lock (this)
+            {
+                int saveMissCount = missCount;
+                DocIdSet docIdSet = base.GetDocIdSet(reader);
+
+                if (shouldHaveCache) {
+                    Assert.AreEqual(saveMissCount, missCount, "Cache should have data ");
+                } else {
+                    Assert.IsTrue(missCount > saveMissCount, "Cache should be null " + docIdSet);
+                }
+
+                return docIdSet;
+            }
+
+            
+        }
+
+        public override System.String ToString()
+        {
+            return "CachingWrapperFilterHelper(" + filter + ")";
+        }
+
+        public override bool Equals(System.Object o)
+        {
+            if (!(o is CachingWrapperFilterHelper))
+                return false;
+            return this.filter.Equals((CachingWrapperFilterHelper)o);
+        }
+
+        public override int GetHashCode()
+        {
+            return this.filter.GetHashCode() ^ 0x5525aacb;
+        }
+    }
 }
\ No newline at end of file

Added: incubator/lucene.net/trunk/C#/src/Test/Search/TestCachingSpanFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Search/TestCachingSpanFilter.cs?rev=1082321&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Search/TestCachingSpanFilter.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Search/TestCachingSpanFilter.cs Wed Mar 16 22:14:41 2011
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+using Lucene.Net.Store;
+using Lucene.Net.Index;
+using Lucene.Net.Analysis;
+using Lucene.Net.Documents;
+using Lucene.Net.Search;
+using Lucene.Net.Search.Spans;
+using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+using NUnit.Framework;
+
+namespace Lucene.Net.Search
+{
+    [TestFixture]
+    public class TestCachingSpanFilter : LuceneTestCase
+    {
+        [Test]
+        public void TestEnforceDeletions()
+        {
+            Directory dir = new MockRAMDirectory();
+            IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+            IndexReader reader = writer.GetReader();
+            IndexSearcher searcher = new IndexSearcher(reader);
+
+            // add a doc, refresh the reader, and check that its there
+            Document doc = new Document();
+            doc.Add(new Field("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+            writer.AddDocument(doc);
+
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            TopDocs docs = searcher.Search(new MatchAllDocsQuery(), 1);
+            Assert.AreEqual(1, docs.totalHits, "Should find a hit...");
+
+            SpanFilter startFilter = new SpanQueryFilter(new SpanTermQuery(new Term("id", "1")));
+
+            // ignore deletions
+            CachingSpanFilter filter = new CachingSpanFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(1, docs.totalHits, "[query + filter] Should find a hit...");
+            ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+
+            // now delete the doc, refresh the reader, and see that it's not there
+            writer.DeleteDocuments(new Term("id", "1"));
+
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(0, docs.totalHits, "[query + filter] Should *not* find a hit...");
+
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+
+
+            // force cache to regenerate:
+            filter = new CachingSpanFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);
+
+            writer.AddDocument(doc);
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(1, docs.totalHits, "[query + filter] Should find a hit...");
+
+            constantScore = new ConstantScoreQuery(filter);
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+
+            // make sure we get a cache hit when we reopen readers
+            // that had no new deletions
+            IndexReader newReader = RefreshReader(reader);
+            Assert.IsTrue(reader != newReader);
+            reader = newReader;
+            searcher = new IndexSearcher(reader);
+            int missCount = filter.missCount;
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+            Assert.AreEqual(missCount, filter.missCount);
+
+            // now delete the doc, refresh the reader, and see that it's not there
+            writer.DeleteDocuments(new Term("id", "1"));
+
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(0, docs.totalHits, "[query + filter] Should *not* find a hit...");
+
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(0, docs.totalHits, "[just filter] Should *not* find a hit...");
+        }
+
+        private static IndexReader RefreshReader(IndexReader reader)
+        {
+            IndexReader oldReader = reader;
+            reader = reader.Reopen();
+            if (reader != oldReader)
+            {
+                oldReader.Close();
+            }
+            return reader;
+        }
+    }
+}

Modified: incubator/lucene.net/trunk/C#/src/Test/Search/TestCachingWrapperFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Search/TestCachingWrapperFilter.cs?rev=1082321&r1=1082320&r2=1082321&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Search/TestCachingWrapperFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Search/TestCachingWrapperFilter.cs Wed Mar 16 22:14:41 2011
@@ -19,45 +19,312 @@ using System;
 
 using NUnit.Framework;
 
-using StandardAnalyzer = Lucene.Net.Analysis.Standard.StandardAnalyzer;
-using IndexReader = Lucene.Net.Index.IndexReader;
-using IndexWriter = Lucene.Net.Index.IndexWriter;
-using Directory = Lucene.Net.Store.Directory;
-using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+using Lucene.Net.Store;
+using Lucene.Net.Index;
+using Lucene.Net.Analysis;
+using Lucene.Net.Documents;
+using Lucene.Net.Search;
+using Lucene.Net.Util;
+using Lucene.Net.Search.Spans;
 using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
 
 namespace Lucene.Net.Search
 {
-	
+
     [TestFixture]
-	public class TestCachingWrapperFilter:LuceneTestCase
-	{
-		[Test]
-		public virtual void  TestCachingWorks()
-		{
-			Directory dir = new RAMDirectory();
-			IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
-			writer.Close();
-			
-			IndexReader reader = IndexReader.Open(dir);
-			
-			MockFilter filter = new MockFilter();
-			CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
-			
-			// first time, nested filter is called
-			cacher.GetDocIdSet(reader);
-			Assert.IsTrue(filter.WasCalled(), "first time");
-			
-			// make sure no exception if cache is holding the wrong bitset
-			cacher.Bits(reader);
-			cacher.GetDocIdSet(reader);
-			
-			// second time, nested filter should not be called
-			filter.Clear();
-			cacher.GetDocIdSet(reader);
-			Assert.IsFalse(filter.WasCalled(), "second time");
-			
-			reader.Close();
-		}
-	}
+    public class TestCachingWrapperFilter : LuceneTestCase
+    {
+        [Test]
+        public virtual void TestCachingWorks()
+        {
+            Directory dir = new RAMDirectory();
+            IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+            writer.Close();
+
+            IndexReader reader = IndexReader.Open(dir);
+
+            MockFilter filter = new MockFilter();
+            CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
+
+            // first time, nested filter is called
+            cacher.GetDocIdSet(reader);
+            Assert.IsTrue(filter.WasCalled(), "first time");
+
+            // make sure no exception if cache is holding the wrong bitset
+            cacher.Bits(reader);
+            cacher.GetDocIdSet(reader);
+
+            // second time, nested filter should not be called
+            filter.Clear();
+            cacher.GetDocIdSet(reader);
+            Assert.IsFalse(filter.WasCalled(), "second time");
+
+            reader.Close();
+        }
+
+
+        [Test]
+        public void TestNullDocIdSet()
+        {
+            Directory dir = new RAMDirectory();
+            IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+            writer.Close();
+
+            IndexReader reader = IndexReader.Open(dir, true);
+
+            Filter filter = new AnonymousFilter();
+
+            CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
+
+            // the caching filter should return the empty set constant
+            Assert.AreSame(DocIdSet.EMPTY_DOCIDSET, cacher.GetDocIdSet(reader));
+
+            reader.Close();
+        }
+
+        class AnonymousFilter : Filter
+        {
+            public override DocIdSet GetDocIdSet(IndexReader reader)
+            {
+                return null;
+            }
+        }
+
+        [Test]
+        public void TestNullDocIdSetIterator()
+        {
+            Directory dir = new RAMDirectory();
+            IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+            writer.Close();
+
+            IndexReader reader = IndexReader.Open(dir, true);
+
+            Filter filter = new AnonymousFilter2();
+            //final Filter filter = new Filter() {
+            //  //@Override
+            //  public DocIdSet getDocIdSet(IndexReader reader) {
+            //    return new DocIdSet() {
+            //      //@Override
+            //      public DocIdSetIterator iterator() {
+            //        return null;
+            //      }
+            //    };
+            //  }
+            //};
+            CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
+
+            // the caching filter should return the empty set constant
+            Assert.AreSame(DocIdSet.EMPTY_DOCIDSET, cacher.GetDocIdSet(reader));
+
+            reader.Close();
+        }
+
+        class AnonymousFilter2 : Filter
+        {
+            class AnonymousDocIdSet : DocIdSet
+            {
+                public override DocIdSetIterator Iterator()
+                {
+                    return null;
+                }
+            }
+
+            public override DocIdSet GetDocIdSet(IndexReader reader)
+            {
+                return new AnonymousDocIdSet();// base.GetDocIdSet(reader);
+            }
+        }
+
+        private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, bool shouldCacheable)
+        {
+            CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
+            DocIdSet originalSet = filter.GetDocIdSet(reader);
+            DocIdSet cachedSet = cacher.GetDocIdSet(reader);
+            Assert.IsTrue(cachedSet.IsCacheable());
+            Assert.AreEqual(shouldCacheable, originalSet.IsCacheable());
+            //System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName());
+            if (originalSet.IsCacheable())
+            {
+                Assert.AreEqual(originalSet.GetType(), cachedSet.GetType(), "Cached DocIdSet must be of same class like uncached, if cacheable");
+            }
+            else
+            {
+                Assert.IsTrue(cachedSet is OpenBitSetDISI, "Cached DocIdSet must be an OpenBitSet if the original one was not cacheable");
+            }
+        }
+
+        [Test]
+        public void TestIsCacheAble()
+        {
+            Directory dir = new RAMDirectory();
+            IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+            writer.Close();
+
+            IndexReader reader = IndexReader.Open(dir);
+
+            // not cacheable:
+            assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test", "value"))), false);
+            // returns default empty docidset, always cacheable:
+            assertDocIdSetCacheable(reader, NumericRangeFilter.NewIntRange("test", 10000, -10000, true, true), true);
+            // is cacheable:
+            assertDocIdSetCacheable(reader, FieldCacheRangeFilter.NewIntRange("test", 10, 20, true, true), true);
+            // a openbitset filter is always cacheable
+            assertDocIdSetCacheable(reader, new AnonymousFilter3(), true);
+            // a deprecated filter is always cacheable
+            assertDocIdSetCacheable(reader, new AnonymousFilter4(), true);
+
+            reader.Close();
+        }
+        /*
+          new Filter() {
+              public DocIdSet getDocIdSet(IndexReader reader) {
+                return new OpenBitSet();
+              }
+            }
+    
+         * new Filter() {
+              public BitSet bits(IndexReader reader) {
+                return new BitSet();
+              }
+            }
+         */
+        class AnonymousFilter3 : Filter
+        {
+            public override DocIdSet GetDocIdSet(IndexReader reader)
+            {
+                return new OpenBitSet();
+            }
+        }
+
+        class AnonymousFilter4 : Filter
+        {
+            public override System.Collections.BitArray Bits(IndexReader reader)
+            {
+                return new System.Collections.BitArray(100000); //DIGY
+            }
+        }
+
+        [Test]
+        public void TestEnforceDeletions()
+        {
+            Directory dir = new MockRAMDirectory();
+            IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+            IndexReader reader = writer.GetReader();
+            IndexSearcher searcher = new IndexSearcher(reader);
+
+            // add a doc, refresh the reader, and check that its there
+            Document doc = new Document();
+            doc.Add(new Field("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+            writer.AddDocument(doc);
+
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            TopDocs docs = searcher.Search(new MatchAllDocsQuery(), 1);
+            Assert.AreEqual(1, docs.totalHits, "Should find a hit...");
+
+            Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));
+
+            // ignore deletions
+            CachingWrapperFilter filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(1, docs.totalHits, "[query + filter] Should find a hit...");
+            ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+
+            // now delete the doc, refresh the reader, and see that it's not there
+            writer.DeleteDocuments(new Term("id", "1"));
+
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(0, docs.totalHits, "[query + filter] Should *not* find a hit...");
+
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+
+
+            // force cache to regenerate:
+            filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);
+
+            writer.AddDocument(doc);
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(1, docs.totalHits, "[query + filter] Should find a hit...");
+
+            constantScore = new ConstantScoreQuery(filter);
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+
+            // make sure we get a cache hit when we reopen reader
+            // that had no change to deletions
+            IndexReader newReader = RefreshReader(reader);
+            Assert.IsTrue(reader != newReader);
+            reader = newReader;
+            searcher = new IndexSearcher(reader);
+            int missCount = filter.missCount;
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+            Assert.AreEqual(missCount, filter.missCount);
+
+            // now delete the doc, refresh the reader, and see that it's not there
+            writer.DeleteDocuments(new Term("id", "1"));
+
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            missCount = filter.missCount;
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(missCount + 1, filter.missCount);
+            Assert.AreEqual(0, docs.totalHits, "[query + filter] Should *not* find a hit...");
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(0, docs.totalHits, "[just filter] Should *not* find a hit...");
+
+
+            // apply deletions dynamically
+            filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.DYNAMIC);
+
+            writer.AddDocument(doc);
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(1, docs.totalHits, "[query + filter] Should find a hit...");
+            constantScore = new ConstantScoreQuery(filter);
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(1, docs.totalHits, "[just filter] Should find a hit...");
+
+            // now delete the doc, refresh the reader, and see that it's not there
+            writer.DeleteDocuments(new Term("id", "1"));
+
+            reader = RefreshReader(reader);
+            searcher = new IndexSearcher(reader);
+
+            docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
+            Assert.AreEqual(0, docs.totalHits, "[query + filter] Should *not* find a hit...");
+
+            missCount = filter.missCount;
+            docs = searcher.Search(constantScore, 1);
+            Assert.AreEqual(0, docs.totalHits, "[just filter] Should *not* find a hit...");
+
+            // doesn't count as a miss
+            Assert.AreEqual(missCount, filter.missCount);
+        }
+
+        private static IndexReader RefreshReader(IndexReader reader)
+        {
+            IndexReader oldReader = reader;
+            reader = reader.Reopen();
+            if (reader != oldReader)
+            {
+                oldReader.Close();
+            }
+            return reader;
+        }
+    }
 }
\ No newline at end of file



Mime
View raw message