lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From markrmil...@apache.org
Subject svn commit: r889683 [2/6] - in /lucene/java/branches/flex_1458: ./ contrib/ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ contrib/analyzers/common/src/java/org/apache/...
Date Fri, 11 Dec 2009 16:22:46 GMT
Modified: lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (original)
+++ lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java Fri Dec 11 16:22:30 2009
@@ -29,16 +29,9 @@
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.TermFreqVector;
-import org.apache.lucene.index.TermPositions;
-import org.apache.lucene.index.TermVectorMapper;
+import org.apache.lucene.index.*;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BitVector;
 
 /**
  * An InstantiatedIndexReader is not a snapshot in time, it is completely in
@@ -105,9 +98,9 @@
     return index;
   }
 
-  private Set<InstantiatedDocument> deletedDocuments = new HashSet<InstantiatedDocument>();
-  private Set<Integer> deletedDocumentNumbers = new HashSet<Integer>();
-  private Map<String,List<NormUpdate>> updatedNormsByFieldNameAndDocumentNumber = null;
+  private BitVector uncommittedDeletedDocuments;
+
+  private Map<String,List<NormUpdate>> uncommittedNormsByFieldNameAndDocumentNumber = null;
 
   private class NormUpdate {
     private int doc;
@@ -121,7 +114,15 @@
 
   @Override
   public int numDocs() {
-    return getIndex().getDocumentsByNumber().length - index.getDeletedDocuments().size() - deletedDocuments.size();
+    // todo i suppose this value could be cached, but array#length and bitvector#count is fast.
+    int numDocs = getIndex().getDocumentsByNumber().length;
+    if (uncommittedDeletedDocuments != null) {
+      numDocs -= uncommittedDeletedDocuments.count();
+    }
+    if (index.getDeletedDocuments() != null) {
+      numDocs -= index.getDeletedDocuments().count();
+    }
+    return numDocs;
   }
 
   @Override
@@ -130,28 +131,39 @@
   }
 
   @Override
-  public boolean isDeleted(int n) {
-    return getIndex().getDeletedDocuments().contains(n) || deletedDocumentNumbers.contains(n);
+  public boolean hasDeletions() {
+    return index.getDeletedDocuments() != null || uncommittedDeletedDocuments != null;
   }
 
+
   @Override
-  public boolean hasDeletions() {
-    return getIndex().getDeletedDocuments().size() > 0 || deletedDocumentNumbers.size() > 0;
+  public boolean isDeleted(int n) {
+    return (index.getDeletedDocuments() != null && index.getDeletedDocuments().get(n))
+        || (uncommittedDeletedDocuments != null && uncommittedDeletedDocuments.get(n));
   }
 
+
   @Override
   protected void doDelete(int docNum) throws IOException {
-    if (!getIndex().getDeletedDocuments().contains(docNum)) {
-      if (deletedDocumentNumbers.add(docNum)) {
-        deletedDocuments.add(getIndex().getDocumentsByNumber()[docNum]);
-      }
+
+    // dont delete if already deleted
+    if ((index.getDeletedDocuments() != null && index.getDeletedDocuments().get(docNum))
+        || (uncommittedDeletedDocuments != null && uncommittedDeletedDocuments.get(docNum))) {
+      return;
     }
+
+    if (uncommittedDeletedDocuments == null) {
+      uncommittedDeletedDocuments = new BitVector(maxDoc());
+    }
+
+    uncommittedDeletedDocuments.set(docNum);
   }
 
   @Override
   protected void doUndeleteAll() throws IOException {
-    deletedDocumentNumbers.clear();
-    deletedDocuments.clear();
+    // todo: read/write lock
+    uncommittedDeletedDocuments = null;
+    // todo: read/write unlock
   }
 
   @Override
@@ -161,25 +173,30 @@
     boolean updated = false;
 
     // 1. update norms
-    if (updatedNormsByFieldNameAndDocumentNumber != null) {
-      for (Map.Entry<String,List<NormUpdate>> e : updatedNormsByFieldNameAndDocumentNumber.entrySet()) {
+    if (uncommittedNormsByFieldNameAndDocumentNumber != null) {
+      for (Map.Entry<String,List<NormUpdate>> e : uncommittedNormsByFieldNameAndDocumentNumber.entrySet()) {
         byte[] norms = getIndex().getNormsByFieldNameAndDocumentNumber().get(e.getKey());
         for (NormUpdate normUpdate : e.getValue()) {
           norms[normUpdate.doc] = normUpdate.value;
         }
       }
-      updatedNormsByFieldNameAndDocumentNumber = null;
+      uncommittedNormsByFieldNameAndDocumentNumber = null;
 
       updated = true;
     }
 
     // 2. remove deleted documents
-    if (deletedDocumentNumbers.size() > 0) {
-      for (Integer doc : deletedDocumentNumbers) {
-        getIndex().getDeletedDocuments().add(doc);
+    if (uncommittedDeletedDocuments != null) {
+      if (index.getDeletedDocuments() == null) {
+        index.setDeletedDocuments(uncommittedDeletedDocuments);
+      } else {
+        for (int d = 0; d< uncommittedDeletedDocuments.size(); d++) {
+          if (uncommittedDeletedDocuments.get(d)) {
+            index.getDeletedDocuments().set(d);
+          }
+        }
       }
-      deletedDocumentNumbers.clear();
-      deletedDocuments.clear();
+      uncommittedDeletedDocuments = null;
 
       updated = true;
 
@@ -195,7 +212,7 @@
   }
 
   @Override
-  public Collection getFieldNames(FieldOption fieldOption) {
+  public Collection<String> getFieldNames(FieldOption fieldOption) {
     Set<String> fieldSet = new HashSet<String>();
     for (FieldSetting fi : index.getFieldSettings().values()) {
       if (fieldOption == IndexReader.FieldOption.ALL) {
@@ -299,9 +316,9 @@
     if (norms == null) {
       return new byte[0]; // todo a static final zero length attribute?
     }
-    if (updatedNormsByFieldNameAndDocumentNumber != null) {
+    if (uncommittedNormsByFieldNameAndDocumentNumber != null) {
       norms = norms.clone();
-      List<NormUpdate> updated = updatedNormsByFieldNameAndDocumentNumber.get(field);
+      List<NormUpdate> updated = uncommittedNormsByFieldNameAndDocumentNumber.get(field);
       if (updated != null) {
         for (NormUpdate normUpdate : updated) {
           norms[normUpdate.doc] = normUpdate.value;
@@ -322,13 +339,13 @@
 
   @Override
   protected void doSetNorm(int doc, String field, byte value) throws IOException {
-    if (updatedNormsByFieldNameAndDocumentNumber == null) {
-      updatedNormsByFieldNameAndDocumentNumber = new HashMap<String,List<NormUpdate>>(getIndex().getNormsByFieldNameAndDocumentNumber().size());
+    if (uncommittedNormsByFieldNameAndDocumentNumber == null) {
+      uncommittedNormsByFieldNameAndDocumentNumber = new HashMap<String,List<NormUpdate>>(getIndex().getNormsByFieldNameAndDocumentNumber().size());
     }
-    List<NormUpdate> list = updatedNormsByFieldNameAndDocumentNumber.get(field);
+    List<NormUpdate> list = uncommittedNormsByFieldNameAndDocumentNumber.get(field);
     if (list == null) {
       list = new LinkedList<NormUpdate>();
-      updatedNormsByFieldNameAndDocumentNumber.put(field, list);
+      uncommittedNormsByFieldNameAndDocumentNumber.put(field, list);
     }
     list.add(new NormUpdate(doc, value));
   }
@@ -367,6 +384,18 @@
     return new InstantiatedTermDocs(this);
   }
 
+
+  @Override
+  public TermDocs termDocs(Term term) throws IOException {
+    if (term == null) {
+      return new InstantiatedAllTermDocs(this);
+    } else {
+      InstantiatedTermDocs termDocs = new InstantiatedTermDocs(this);
+      termDocs.seek(term);
+      return termDocs;
+    }
+  }
+
   @Override
   public TermPositions termPositions() throws IOException {
     return new InstantiatedTermPositions(this);
@@ -411,7 +440,7 @@
   @Override
   public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
     InstantiatedDocument doc = getIndex().getDocumentsByNumber()[docNumber];
-    for (Map.Entry<String,List<InstantiatedTermDocumentInformation>> e : doc.getVectorSpace().entrySet()) {
+    for (Map.Entry<String, List<InstantiatedTermDocumentInformation>> e : doc.getVectorSpace().entrySet()) {
       mapper.setExpectations(e.getKey(), e.getValue().size(), true, true);
       for (InstantiatedTermDocumentInformation tdi : e.getValue()) {
         mapper.map(tdi.getTerm().text(), tdi.getTermPositions().length, tdi.getTermOffsets(), tdi.getTermPositions());

Modified: lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (original)
+++ lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java Fri Dec 11 16:22:30 2009
@@ -46,6 +46,7 @@
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.BitVector;
 
 /**
  * This class, similar to {@link org.apache.lucene.index.IndexWriter}, has no locking mechanism.
@@ -406,6 +407,18 @@
     termDocumentInformationFactoryByDocument.clear();
     fieldNameBuffer.clear();
 
+
+    // update deleted documents bitset
+    if (index.getDeletedDocuments() != null) {
+      BitVector deletedDocuments = new BitVector(index.getDocumentsByNumber().length);
+      for (int i = 0; i < index.getDeletedDocuments().size(); i++) {
+        if (index.getDeletedDocuments().get(i)) {
+          deletedDocuments.set(i);
+        }
+      }
+      index.setDeletedDocuments(deletedDocuments);
+    }
+
     index.setVersion(System.currentTimeMillis());
 
     // todo unlock

Modified: lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermDocs.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermDocs.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermDocs.java (original)
+++ lucene/java/branches/flex_1458/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermDocs.java Fri Dec 11 16:22:30 2009
@@ -60,11 +60,14 @@
       currentDocumentIndex++;
       if (currentDocumentIndex < currentTerm.getAssociatedDocuments().length) {
         currentDocumentInformation = currentTerm.getAssociatedDocuments()[currentDocumentIndex];
-        if (reader.hasDeletions() && reader.isDeleted(currentDocumentInformation.getDocument().getDocumentNumber())) {
+        if (reader.isDeleted(currentDocumentInformation.getDocument().getDocumentNumber())) {
           return next();
         } else {
           return true;
         }
+      } else {
+        // mimic SegmentTermDocs
+        currentDocumentIndex = currentTerm.getAssociatedDocuments().length -1;
       }
     }
     return false;
@@ -111,6 +114,8 @@
     int pos = currentTerm.seekCeilingDocumentInformationIndex(target, startOffset);
 
     if (pos == -1) {
+      // mimic SegmentTermDocs that positions at the last index
+      currentDocumentIndex = currentTerm.getAssociatedDocuments().length -1;
       return false;
     }
 

Modified: lucene/java/branches/flex_1458/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (original)
+++ lucene/java/branches/flex_1458/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java Fri Dec 11 16:22:30 2009
@@ -41,8 +41,6 @@
 import org.apache.lucene.index.TermPositions;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.util.AttributeImpl;
 
 /**
@@ -104,53 +102,155 @@
     }
     instantiatedIndexWriter.close();
 
+
     testEqualBehaviour(dir, ii);
 
-    testTermDocs(dir, ii);
 
 
   }
 
 
-  private void testTermDocs(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception {
+  private void testTermDocsSomeMore(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception {
 
     IndexReader aprioriReader = IndexReader.open(aprioriIndex, false);
     IndexReader testReader = testIndex.indexReaderFactory();
 
-    TermEnum aprioriTermEnum = aprioriReader.terms(new Term("c", "danny"));
+    // test seek
 
-    TermDocs aprioriTermDocs = aprioriReader.termDocs(aprioriTermEnum.term());
-    TermDocs testTermDocs = testReader.termDocs(aprioriTermEnum.term());
+    Term t = new Term("c", "danny");
+    TermEnum aprioriTermEnum = aprioriReader.terms(t);
+    TermEnum testTermEnum = testReader.terms(t);
 
-    assertEquals(aprioriTermDocs.next(), testTermDocs.next());
-    assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    assertEquals(aprioriTermEnum.term(), testTermEnum.term());
 
-    assertEquals(aprioriTermDocs.skipTo(100), testTermDocs.skipTo(100));
-    assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    t = aprioriTermEnum.term();
 
-    assertEquals(aprioriTermDocs.next(), testTermDocs.next());
-    assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    aprioriTermEnum.close();
+    testTermEnum.close();
+
+    TermDocs aprioriTermDocs = aprioriReader.termDocs(t);
+    TermDocs testTermDocs = testReader.termDocs(t);
 
     assertEquals(aprioriTermDocs.next(), testTermDocs.next());
+    assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
     assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
 
-    assertEquals(aprioriTermDocs.skipTo(110), testTermDocs.skipTo(110));
-    assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    if (aprioriTermDocs.skipTo(4)) {
+      assertTrue(testTermDocs.skipTo(4));
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    } else {
+      assertFalse(testTermDocs.skipTo(4));
+    }
 
-    assertEquals(aprioriTermDocs.skipTo(10), testTermDocs.skipTo(10));
-    assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    if (aprioriTermDocs.next()) {
+      assertTrue(testTermDocs.next());
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    } else {
+      assertFalse(testTermDocs.next());
+    }
 
-    assertEquals(aprioriTermDocs.skipTo(210), testTermDocs.skipTo(210));
-    assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+
+    // beyond this point all next and skipto will return false
+
+    if (aprioriTermDocs.skipTo(100)) {
+      assertTrue(testTermDocs.skipTo(100));
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    } else {
+      assertFalse(testTermDocs.skipTo(100));
+    }
+
+
+    if (aprioriTermDocs.next()) {
+      assertTrue(testTermDocs.next());
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    } else {
+      assertFalse(testTermDocs.next());
+    }
+
+    if (aprioriTermDocs.skipTo(110)) {
+      assertTrue(testTermDocs.skipTo(110));
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    } else {
+      assertFalse(testTermDocs.skipTo(110));
+    }
+
+    if (aprioriTermDocs.skipTo(10)) {
+      assertTrue(testTermDocs.skipTo(10));
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    } else {
+      assertFalse(testTermDocs.skipTo(10));
+    }
+
+
+    if (aprioriTermDocs.skipTo(210)) {
+      assertTrue(testTermDocs.skipTo(210));
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    } else {
+      assertFalse(testTermDocs.skipTo(210));
+    }
 
     aprioriTermDocs.close();
-    aprioriReader.close();
+    testTermDocs.close();
+
+
+
+    // test seek null (AllTermDocs)
+    aprioriTermDocs = aprioriReader.termDocs(null);
+    testTermDocs = testReader.termDocs(null);
+
+    while (aprioriTermDocs.next()) {
+      assertTrue(testTermDocs.next());
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    }
+    assertFalse(testTermDocs.next());
+
+
+    aprioriTermDocs.close();
+    testTermDocs.close();
+
+
+    // test seek default
+    aprioriTermDocs = aprioriReader.termDocs();
+    testTermDocs = testReader.termDocs();
+
+    // this is invalid use of the API,
+    // but if the response differs then it's an indication that something might have changed.
+    // in 2.9 and 3.0 the two TermDocs-implementations returned different values at this point.
+    assertEquals("Descripency during invalid use of the TermDocs API, see comments in test code for details.",
+        aprioriTermDocs.next(), testTermDocs.next());
+
+    // start using the API one is supposed to
+
+    t = new Term("", "");
+    aprioriTermDocs.seek(t);
+    testTermDocs.seek(t);
+
+    while (aprioriTermDocs.next()) {
+      assertTrue(testTermDocs.next());
+      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
+      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
+    }
+    assertFalse(testTermDocs.next());
 
+    aprioriTermDocs.close();
     testTermDocs.close();
+
+
+    // clean up
+    aprioriReader.close();
     testReader.close();
 
   }
 
+
   private void assembleDocument(Document document, int i) {
     document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     if (i > 0) {
@@ -213,18 +313,57 @@
    */
   protected void testEqualBehaviour(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception {
 
+
     testEquals(aprioriIndex,  testIndex);
 
-       // delete a few documents
-    IndexReader ir = IndexReader.open(aprioriIndex, false);
-    ir.deleteDocument(3);
-    ir.deleteDocument(8);
-    ir.close();
+    // delete a few documents
+    IndexReader air = IndexReader.open(aprioriIndex, false);
+    InstantiatedIndexReader tir = testIndex.indexReaderFactory();
+
+    assertEquals(air.isCurrent(), tir.isCurrent());
+    assertEquals(air.hasDeletions(), tir.hasDeletions());
+    assertEquals(air.maxDoc(), tir.maxDoc());
+    assertEquals(air.numDocs(), tir.numDocs());
+    assertEquals(air.numDeletedDocs(), tir.numDeletedDocs());
+
+    air.deleteDocument(3);
+    tir.deleteDocument(3);
+
+    assertEquals(air.isCurrent(), tir.isCurrent());
+    assertEquals(air.hasDeletions(), tir.hasDeletions());
+    assertEquals(air.maxDoc(), tir.maxDoc());
+    assertEquals(air.numDocs(), tir.numDocs());
+    assertEquals(air.numDeletedDocs(), tir.numDeletedDocs());
+
+    air.deleteDocument(8);
+    tir.deleteDocument(8);
+
+    assertEquals(air.isCurrent(), tir.isCurrent());
+    assertEquals(air.hasDeletions(), tir.hasDeletions());
+    assertEquals(air.maxDoc(), tir.maxDoc());
+    assertEquals(air.numDocs(), tir.numDocs());
+    assertEquals(air.numDeletedDocs(), tir.numDeletedDocs());    
+
+    // this (in 3.0) commits the deletions
+    air.close();
+    tir.close();
+
+    air = IndexReader.open(aprioriIndex, false);
+    tir = testIndex.indexReaderFactory();
+
+    assertEquals(air.isCurrent(), tir.isCurrent());
+    assertEquals(air.hasDeletions(), tir.hasDeletions());
+    assertEquals(air.maxDoc(), tir.maxDoc());
+    assertEquals(air.numDocs(), tir.numDocs());
+    assertEquals(air.numDeletedDocs(), tir.numDeletedDocs());
+
+    for (int d =0; d<air.maxDoc(); d++) {
+      assertEquals(air.isDeleted(d), tir.isDeleted(d));
+    }
+
+    air.close();
+    tir.close();
 
-    ir = testIndex.indexReaderFactory();
-    ir.deleteDocument(3);
-    ir.deleteDocument(8);
-    ir.close();
 
     // make sure they still equal
     testEquals(aprioriIndex,  testIndex);
@@ -232,6 +371,8 @@
 
   protected void testEquals(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception {
 
+    testTermDocsSomeMore(aprioriIndex, testIndex);
+
     IndexReader aprioriReader = IndexReader.open(aprioriIndex, false);
     IndexReader testReader = testIndex.indexReaderFactory();
 

Modified: lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (original)
+++ lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java Fri Dec 11 16:22:30 2009
@@ -197,8 +197,8 @@
    */
   private static final Comparator termComparator = new Comparator() {
     public int compare(Object o1, Object o2) {
-      if (o1 instanceof Map.Entry) o1 = ((Map.Entry) o1).getKey();
-      if (o2 instanceof Map.Entry) o2 = ((Map.Entry) o2).getKey();
+      if (o1 instanceof Map.Entry<?,?>) o1 = ((Map.Entry<?,?>) o1).getKey();
+      if (o2 instanceof Map.Entry<?,?>) o2 = ((Map.Entry<?,?>) o2).getKey();
       if (o1 == o2) return 0;
       return ((String) o1).compareTo((String) o2);
     }

Modified: lucene/java/branches/flex_1458/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (original)
+++ lucene/java/branches/flex_1458/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java Fri Dec 11 16:22:30 2009
@@ -274,7 +274,7 @@
     boolean toLowerCase = true;
 //    boolean toLowerCase = false;
 //    Set stopWords = null;
-    Set stopWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+    Set<?> stopWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
     
     Analyzer[] analyzers = new Analyzer[] { 
         new SimpleAnalyzer(),
@@ -380,7 +380,7 @@
   private String[] readLines(File file) throws Exception {
     BufferedReader reader = new BufferedReader(new InputStreamReader(
         new FileInputStream(file))); 
-    List lines = new ArrayList();
+    List<String> lines = new ArrayList<String>();
     String line;  
     while ((line = reader.readLine()) != null) {
       String t = line.trim(); 
@@ -493,7 +493,7 @@
   
   /** returns all files matching the given file name patterns (quick n'dirty) */
   static String[] listFiles(String[] fileNames) {
-    LinkedHashSet allFiles = new LinkedHashSet();
+    LinkedHashSet<String> allFiles = new LinkedHashSet<String>();
     for (int i=0; i < fileNames.length; i++) {
       int k;
       if ((k = fileNames[i].indexOf("*")) < 0) {

Modified: lucene/java/branches/flex_1458/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java Fri Dec 11 16:22:30 2009
@@ -90,7 +90,7 @@
 
     Query q = qp.parse(qString);
 
-    HashSet expecteds = new HashSet();
+    HashSet<String> expecteds = new HashSet<String>();
     String[] vals = expectedVals.split(",");
     for (int i = 0; i < vals.length; i++) {
       if (vals[i].length() > 0)

Modified: lucene/java/branches/flex_1458/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java Fri Dec 11 16:22:30 2009
@@ -49,7 +49,7 @@
 public class TestPrecedenceQueryParser extends LocalizedTestCase {
   
   public TestPrecedenceQueryParser(String name) {
-    super(name, new HashSet(Arrays.asList(new String[]{
+    super(name, new HashSet<String>(Arrays.asList(new String[]{
       "testDateRange", "testNumber"
     })));
   }

Modified: lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (original)
+++ lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java Fri Dec 11 16:22:30 2009
@@ -81,7 +81,7 @@
 	public void testDefaultFilter() throws Throwable
 	{
 		DuplicateFilter df=new DuplicateFilter(KEY_FIELD);		
-		HashSet results=new HashSet();
+		HashSet<String> results=new HashSet<String>();
 		ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs;
 		for(int i=0;i<hits.length;i++)
 		{
@@ -93,7 +93,7 @@
 	}
 	public void testNoFilter() throws Throwable
 	{
-		HashSet results=new HashSet();
+		HashSet<String> results=new HashSet<String>();
 		ScoreDoc[] hits = searcher.search(tq, null, 1000).scoreDocs;
 		assertTrue("Default searching should have found some matches",hits.length>0);
 		boolean dupsFound=false;
@@ -112,7 +112,7 @@
 	{
 		DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
 		df.setProcessingMode(DuplicateFilter.PM_FAST_INVALIDATION);
-		HashSet results=new HashSet();
+		HashSet<String> results=new HashSet<String>();
 		ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs;
 		assertTrue("Filtered searching should have found some matches",hits.length>0);
 		for(int i=0;i<hits.length;i++)

Modified: lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java (original)
+++ lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java Fri Dec 11 16:22:30 2009
@@ -70,7 +70,7 @@
 		FuzzyLikeThisQuery flt=new FuzzyLikeThisQuery(10,analyzer);
 		flt.addTerms("smith", "name", 0.3f, 1);
 		Query q=flt.rewrite(searcher.getIndexReader());
-		HashSet queryTerms=new HashSet();
+		HashSet<Term> queryTerms=new HashSet<Term>();
 		q.extractTerms(queryTerms);
 		assertTrue("Should have variant smythe",queryTerms.contains(new Term("name","smythe")));
 		assertTrue("Should have variant smith",queryTerms.contains(new Term("name","smith")));
@@ -87,7 +87,7 @@
 		FuzzyLikeThisQuery flt=new FuzzyLikeThisQuery(10,analyzer);
 		flt.addTerms("jonathin smoth", "name", 0.3f, 1);
 		Query q=flt.rewrite(searcher.getIndexReader());
-		HashSet queryTerms=new HashSet();
+		HashSet<Term> queryTerms=new HashSet<Term>();
 		q.extractTerms(queryTerms);
 		assertTrue("Should have variant jonathan",queryTerms.contains(new Term("name","jonathan")));
 		assertTrue("Should have variant smith",queryTerms.contains(new Term("name","smith")));
@@ -103,7 +103,7 @@
 		FuzzyLikeThisQuery flt=new FuzzyLikeThisQuery(10,analyzer);
 		flt.addTerms("fernando smith", "name", 0.3f, 1);
 		Query q=flt.rewrite(searcher.getIndexReader());
-		HashSet queryTerms=new HashSet();
+		HashSet<Term> queryTerms=new HashSet<Term>();
 		q.extractTerms(queryTerms);
 		assertTrue("Should have variant smith",queryTerms.contains(new Term("name","smith")));
 		TopDocs topDocs = searcher.search(flt, 1);

Modified: lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (original)
+++ lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java Fri Dec 11 16:22:30 2009
@@ -38,7 +38,7 @@
 		TermsFilter a=new TermsFilter();
 		a.addTerm(new Term("field1","a"));
 		a.addTerm(new Term("field1","b"));
-		HashSet cachedFilters=new HashSet();
+		HashSet<Filter> cachedFilters=new HashSet<Filter>();
 		cachedFilters.add(a);
 		TermsFilter b=new TermsFilter();
 		b.addTerm(new Term("field1","a"));

Modified: lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (original)
+++ lucene/java/branches/flex_1458/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java Fri Dec 11 16:22:30 2009
@@ -71,7 +71,7 @@
     }
 
     public void testBoostFactor() throws Throwable {
-	Map originalValues = getOriginalValues();
+	Map<String,Float> originalValues = getOriginalValues();
 
 	MoreLikeThis mlt = new MoreLikeThis(
 		reader);
@@ -88,13 +88,13 @@
 
 	BooleanQuery query = (BooleanQuery) mlt.like(new StringReader(
 		"lucene release"));
-	List clauses = query.clauses();
+	List<BooleanClause> clauses = query.clauses();
 
 	assertEquals("Expected " + originalValues.size() + " clauses.",
 		originalValues.size(), clauses.size());
 
 	for (int i = 0; i < clauses.size(); i++) {
-	    BooleanClause clause = (BooleanClause) clauses.get(i);
+	    BooleanClause clause =  clauses.get(i);
 	    TermQuery tq = (TermQuery) clause.getQuery();
 	    Float termBoost = (Float) originalValues.get(tq.getTerm().text());
 	    assertNotNull("Expected term " + tq.getTerm().text(), termBoost);
@@ -106,8 +106,8 @@
 	}
     }
 
-    private Map getOriginalValues() throws IOException {
-	Map originalValues = new HashMap();
+    private Map<String,Float> getOriginalValues() throws IOException {
+	Map<String,Float> originalValues = new HashMap<String,Float>();
 	MoreLikeThis mlt = new MoreLikeThis(reader);
 	mlt.setMinDocFreq(1);
 	mlt.setMinTermFreq(1);
@@ -116,10 +116,10 @@
 	mlt.setBoost(true);
 	BooleanQuery query = (BooleanQuery) mlt.like(new StringReader(
 		"lucene release"));
-	List clauses = query.clauses();
+	List<BooleanClause> clauses = query.clauses();
 
 	for (int i = 0; i < clauses.size(); i++) {
-	    BooleanClause clause = (BooleanClause) clauses.get(i);
+	    BooleanClause clause = clauses.get(i);
 	    TermQuery tq = (TermQuery) clause.getQuery();
 	    originalValues.put(tq.getTerm().text(), Float.valueOf(tq.getBoost()));
 	}

Modified: lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (original)
+++ lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java Fri Dec 11 16:22:30 2009
@@ -146,7 +146,7 @@
   }
 
   public void testBoostsSimple() throws Exception {
-    Map boosts = new HashMap();
+    Map<CharSequence,Float> boosts = new HashMap<CharSequence,Float>();
     boosts.put("b", Float.valueOf(5));
     boosts.put("t", Float.valueOf(10));
     String[] fields = { "b", "t" };

Modified: lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java (original)
+++ lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java Fri Dec 11 16:22:30 2009
@@ -141,7 +141,7 @@
   }
 
   public void testBoostsSimple() throws Exception {
-    Map boosts = new HashMap();
+    Map<CharSequence,Float> boosts = new HashMap<CharSequence,Float>();
     boosts.put("b", Float.valueOf(5));
     boosts.put("t", Float.valueOf(10));
     String[] fields = { "b", "t" };

Modified: lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (original)
+++ lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java Fri Dec 11 16:22:30 2009
@@ -89,7 +89,7 @@
 public class TestQPHelper extends LocalizedTestCase {
 
   public TestQPHelper(String name) {
-    super(name, new HashSet(Arrays.asList(new String[]{
+    super(name, new HashSet<String>(Arrays.asList(new String[]{
       "testLegacyDateRange", "testDateRange",
       "testCJK", "testNumber", "testFarsiRangeCollating",
       "testLocalDateFormat"

Modified: lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java (original)
+++ lucene/java/branches/flex_1458/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java Fri Dec 11 16:22:30 2009
@@ -84,7 +84,7 @@
 public class TestQueryParserWrapper extends LocalizedTestCase {
 
   public TestQueryParserWrapper(String name) {
-    super(name, new HashSet(Arrays.asList(new String[]{
+    super(name, new HashSet<String>(Arrays.asList(new String[]{
       "testLegacyDateRange", "testDateRange",
       "testCJK", "testNumber", "testFarsiRangeCollating",
       "testLocalDateFormat"

Modified: lucene/java/branches/flex_1458/contrib/snowball/build.xml
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/snowball/build.xml?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/snowball/build.xml (original)
+++ lucene/java/branches/flex_1458/contrib/snowball/build.xml Fri Dec 11 16:22:30 2009
@@ -30,6 +30,15 @@
   <property name="snowball.root" value="snowball/website"/>
   <property name="bin.dir" location="bin"/>
 
+  <property name="analyzers.jar" location="${common.dir}/build/contrib/analyzers/common/lucene-analyzers-${version}.jar"/>
+  <available property="analyzers.jar.present" type="file" file="${analyzers.jar}"/>
+  
+  <path id="classpath">
+	<pathelement path="${lucene.jar}"/>
+	<pathelement path="${analyzers.jar}"/>
+	<pathelement path="${project.classpath}"/>
+  </path>
+
   <target name="jar" depends="compile" description="Create JAR">
     <jarify>
       <metainf-includes>
@@ -121,5 +130,11 @@
 
   </target>
 
+  <target name="compile-core" depends="build-analyzers, common.compile-core" />
+  
+  <target name="build-analyzers" unless="analyzers.jar.present">
+    <echo>Snowball building dependency ${analyzers.jar}</echo>
+    <ant antfile="../analyzers/build.xml" target="default" inheritall="false" dir="../analyzers" />
+  </target>
 
 </project>

Modified: lucene/java/branches/flex_1458/contrib/snowball/pom.xml.template
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/snowball/pom.xml.template?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/snowball/pom.xml.template (original)
+++ lucene/java/branches/flex_1458/contrib/snowball/pom.xml.template Fri Dec 11 16:22:30 2009
@@ -33,4 +33,11 @@
   <version>@version@</version>
   <description>Snowball Analyzers</description>
   <packaging>jar</packaging>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-analyzers</artifactId>
+      <version>@version@</version>
+    </dependency>
+  </dependencies>
 </project>

Modified: lucene/java/branches/flex_1458/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java Fri Dec 11 16:22:30 2009
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.analysis.standard.*;
+import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
 import org.apache.lucene.util.Version;
 
 import java.io.IOException;
@@ -33,9 +34,13 @@
  * {@link org.tartarus.snowball.ext.EnglishStemmer} is named "English".
  *
  * <p><b>NOTE</b>: This class uses the same {@link Version}
- * dependent settings as {@link StandardAnalyzer}.</p>
+ * dependent settings as {@link StandardAnalyzer}, with the following addition:
+ * <ul>
+ *   <li> As of 3.1, uses {@link TurkishLowerCaseFilter} for Turkish language.
+ * </ul>
+ * </p>
  */
-public class SnowballAnalyzer extends Analyzer {
+public final class SnowballAnalyzer extends Analyzer {
   private String name;
   private Set<?> stopSet;
   private final Version matchVersion;
@@ -43,7 +48,6 @@
   /** Builds the named analyzer with no stop words. */
   public SnowballAnalyzer(Version matchVersion, String name) {
     this.name = name;
-    setOverridesTokenStreamMethod(SnowballAnalyzer.class);
     this.matchVersion = matchVersion;
   }
 
@@ -60,7 +64,11 @@
   public TokenStream tokenStream(String fieldName, Reader reader) {
     TokenStream result = new StandardTokenizer(matchVersion, reader);
     result = new StandardFilter(result);
-    result = new LowerCaseFilter(matchVersion, result);
+    // Use a special lowercase filter for turkish, the stemmer expects it.
+    if (matchVersion.onOrAfter(Version.LUCENE_31) && name.equals("Turkish"))
+      result = new TurkishLowerCaseFilter(result);
+    else
+      result = new LowerCaseFilter(matchVersion, result);
     if (stopSet != null)
       result = new StopFilter(matchVersion,
                               result, stopSet);
@@ -71,7 +79,7 @@
   private class SavedStreams {
     Tokenizer source;
     TokenStream result;
-  };
+  }
   
   /** Returns a (possibly reused) {@link StandardTokenizer} filtered by a 
    * {@link StandardFilter}, a {@link LowerCaseFilter}, 
@@ -79,19 +87,16 @@
   @Override
   public TokenStream reusableTokenStream(String fieldName, Reader reader)
       throws IOException {
-    if (overridesTokenStreamMethod) {
-      // LUCENE-1678: force fallback to tokenStream() if we
-      // have been subclassed and that subclass overrides
-      // tokenStream but not reusableTokenStream
-      return tokenStream(fieldName, reader);
-    }
-    
     SavedStreams streams = (SavedStreams) getPreviousTokenStream();
     if (streams == null) {
       streams = new SavedStreams();
       streams.source = new StandardTokenizer(matchVersion, reader);
       streams.result = new StandardFilter(streams.source);
-      streams.result = new LowerCaseFilter(matchVersion, streams.result);
+      // Use a special lowercase filter for turkish, the stemmer expects it.
+      if (matchVersion.onOrAfter(Version.LUCENE_31) && name.equals("Turkish"))
+        streams.result = new TurkishLowerCaseFilter(streams.result);
+      else
+        streams.result = new LowerCaseFilter(matchVersion, streams.result);
       if (stopSet != null)
         streams.result = new StopFilter(matchVersion,
                                         streams.result, stopSet);

Modified: lucene/java/branches/flex_1458/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java Fri Dec 11 16:22:30 2009
@@ -22,12 +22,20 @@
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; // javadoc @link
+import org.apache.lucene.analysis.LowerCaseFilter; // javadoc @link
 import org.tartarus.snowball.SnowballProgram;
 
 /**
  * A filter that stems words using a Snowball-generated stemmer.
  *
  * Available stemmers are listed in {@link org.tartarus.snowball.ext}.
+ * <p><b>NOTE</b>: SnowballFilter expects lowercased text.
+ * <ul>
+ *  <li>For the Turkish language, see {@link TurkishLowerCaseFilter}.
+ *  <li>For other languages, see {@link LowerCaseFilter}.
+ * </ul>
+ * </p>
  */
 public final class SnowballFilter extends TokenFilter {
 

Modified: lucene/java/branches/flex_1458/contrib/snowball/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/snowball/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/snowball/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java (original)
+++ lucene/java/branches/flex_1458/contrib/snowball/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java Fri Dec 11 16:22:30 2009
@@ -17,12 +17,8 @@
  * limitations under the License.
  */
 
-import java.io.Reader;
-import java.io.StringReader;
-
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.WhitespaceTokenizer;
 import org.apache.lucene.index.Payload;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
@@ -41,32 +37,50 @@
         new String[]{"he", "abhor", "accent"});
   }
 
-  public void testReusableTokenStream() throws Exception {
+  /**
+   * Test english lowercasing. Test both cases (pre-3.1 and post-3.1) to ensure
+   * we lowercase I correct for non-Turkish languages in either case.
+   */
+  public void testEnglishLowerCase() throws Exception {
     Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
-    assertAnalyzesToReuse(a, "he abhorred accents",
-        new String[]{"he", "abhor", "accent"});
-    assertAnalyzesToReuse(a, "she abhorred him",
-        new String[]{"she", "abhor", "him"});
+    assertAnalyzesTo(a, "cryogenic", new String[] { "cryogen" });
+    assertAnalyzesTo(a, "CRYOGENIC", new String[] { "cryogen" });
+    
+    Analyzer b = new SnowballAnalyzer(Version.LUCENE_30, "English");
+    assertAnalyzesTo(b, "cryogenic", new String[] { "cryogen" });
+    assertAnalyzesTo(b, "CRYOGENIC", new String[] { "cryogen" });
   }
   
   /**
-   * subclass that acts just like whitespace analyzer for testing
+   * Test turkish lowercasing
    */
-  private class SnowballSubclassAnalyzer extends SnowballAnalyzer {
-    public SnowballSubclassAnalyzer(String name) {
-      super(Version.LUCENE_CURRENT, name);
-    }
-    
-    @Override
-    public TokenStream tokenStream(String fieldName, Reader reader) {
-      return new WhitespaceTokenizer(reader);
-    }
+  public void testTurkish() throws Exception {
+    Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "Turkish");
+
+    assertAnalyzesTo(a, "ağacı", new String[] { "ağaç" });
+    assertAnalyzesTo(a, "AĞACI", new String[] { "ağaç" });
+  }
+  
+  /**
+   * Test turkish lowercasing (old buggy behavior)
+   * @deprecated Remove this when support for 3.0 indexes is no longer required
+   */
+  public void testTurkishBWComp() throws Exception {
+    Analyzer a = new SnowballAnalyzer(Version.LUCENE_30, "Turkish");
+    // AĞACI in turkish lowercases to ağacı, but with lowercase filter ağaci.
+    // this fails due to wrong casing, because the stemmer
+    // will only remove -ı, not -i
+    assertAnalyzesTo(a, "ağacı", new String[] { "ağaç" });
+    assertAnalyzesTo(a, "AĞACI", new String[] { "ağaci" });
   }
+
   
-  public void testLUCENE1678BWComp() throws Exception {
-    Analyzer a = new SnowballSubclassAnalyzer("English");
+  public void testReusableTokenStream() throws Exception {
+    Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
     assertAnalyzesToReuse(a, "he abhorred accents",
-        new String[]{"he", "abhorred", "accents"});
+        new String[]{"he", "abhor", "accent"});
+    assertAnalyzesToReuse(a, "she abhorred him",
+        new String[]{"she", "abhor", "him"});
   }
   
   public void testFilterTokens() throws Exception {

Modified: lucene/java/branches/flex_1458/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java (original)
+++ lucene/java/branches/flex_1458/contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java Fri Dec 11 16:22:30 2009
@@ -32,6 +32,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 
 /**
@@ -60,10 +61,14 @@
    * Field name for each word in the ngram index.
    */
   public static final String F_WORD = "word";
+  
+  private static final Term F_WORD_TERM = new Term(F_WORD);
 
   /**
    * the spell index
    */
+  // don't modify the directory directly - see #swapSearcher()
+  // TODO: why is this package private?
   Directory spellIndex;
 
   /**
@@ -72,7 +77,22 @@
   private float bStart = 2.0f;
   private float bEnd = 1.0f;
 
+  // don't use this searcher directly - see #swapSearcher()
   private IndexSearcher searcher;
+  
+  /*
+   * this locks all modifications to the current searcher. 
+   */
+  private final Object searcherLock = new Object();
+  
+  /*
+   * this lock synchronizes all possible modifications to the 
+   * current index directory. It should not be possible to try modifying
+   * the same index concurrently. Note: Do not acquire the searcher lock
+   * before acquiring this lock! 
+   */
+  private final Object modifyCurrentIndexLock = new Object();
+  private volatile boolean closed = false;
 
   // minimum score for hits generated by the spell checker query
   private float minScore = 0.5f;
@@ -82,15 +102,24 @@
   /**
    * Use the given directory as a spell checker index. The directory
    * is created if it doesn't exist yet.
+   * @param spellIndex the spell index directory
+   * @param sd the {@link StringDistance} measurement to use 
+   * @throws IOException if Spellchecker can not open the directory
+   */
+  public SpellChecker(Directory spellIndex, StringDistance sd) throws IOException {
+    setSpellIndex(spellIndex);
+    setStringDistance(sd);
+  }
+  /**
+   * Use the given directory as a spell checker index with a
+   * {@link LevensteinDistance} as the default {@link StringDistance}. The
+   * directory is created if it doesn't exist yet.
    * 
    * @param spellIndex
+   *          the spell index directory
    * @throws IOException
+   *           if spellchecker can not open the directory
    */
-  public SpellChecker(Directory spellIndex,StringDistance sd) throws IOException {
-    this.setSpellIndex(spellIndex);
-    this.setStringDistance(sd);
-  }
-
   public SpellChecker(Directory spellIndex) throws IOException {
     this(spellIndex, new LevensteinDistance());
   }
@@ -99,27 +128,41 @@
    * Use a different index as the spell checker index or re-open
    * the existing index if <code>spellIndex</code> is the same value
    * as given in the constructor.
-   * 
-   * @param spellIndex
-   * @throws IOException
-   */
-  public void setSpellIndex(Directory spellIndex) throws IOException {
-    this.spellIndex = spellIndex;
-    if (!IndexReader.indexExists(spellIndex)) {
-        IndexWriter writer = new IndexWriter(spellIndex, null, true, IndexWriter.MaxFieldLength.UNLIMITED);
-        writer.close();
-    }
-    // close the old searcher, if there was one
-    if (searcher != null) {
-      searcher.close();
+   * @param spellIndexDir the spell directory to use
+   * @throws AlreadyClosedException if the Spellchecker is already closed
+   * @throws  IOException if spellchecker can not open the directory
+   */
+  // TODO: we should make this final as it is called in the constructor
+  public void setSpellIndex(Directory spellIndexDir) throws IOException {
+    // this could be the same directory as the current spellIndex
+    // modifications to the directory should be synchronized 
+    synchronized (modifyCurrentIndexLock) {
+      ensureOpen();
+      if (!IndexReader.indexExists(spellIndexDir)) {
+          IndexWriter writer = new IndexWriter(spellIndexDir, null, true,
+              IndexWriter.MaxFieldLength.UNLIMITED);
+          writer.close();
+      }
+      swapSearcher(spellIndexDir);
     }
-    searcher = new IndexSearcher(this.spellIndex, true);
   }
-  
+  /**
+   * Sets the {@link StringDistance} implementation for this
+   * {@link SpellChecker} instance.
+   * 
+   * @param sd the {@link StringDistance} implementation for this
+   * {@link SpellChecker} instance
+   */
   public void setStringDistance(StringDistance sd) {
     this.sd = sd;
   }
-
+  /**
+   * Returns the {@link StringDistance} instance used by this
+   * {@link SpellChecker} instance.
+   * 
+   * @return the {@link StringDistance} instance used by this
+   *         {@link SpellChecker} instance.
+   */
   public StringDistance getStringDistance() {
     return sd;
   }
@@ -144,7 +187,8 @@
    *
    * @param word the word you want a spell check done on
    * @param numSug the number of suggested words
-   * @throws IOException
+   * @throws IOException if the underlying index throws an {@link IOException}
+   * @throws AlreadyClosedException if the Spellchecker is already closed
    * @return String[]
    */
   public String[] suggestSimilar(String word, int numSug) throws IOException {
@@ -169,98 +213,104 @@
    * words are restricted to the words present in this field.
    * @param morePopular return only the suggest words that are as frequent or more frequent than the searched word
    * (only if restricted mode = (indexReader!=null and field!=null)
-   * @throws IOException
+   * @throws IOException if the underlying index throws an {@link IOException}
+   * @throws AlreadyClosedException if the Spellchecker is already closed
    * @return String[] the sorted list of the suggest words with these 2 criteria:
    * first criteria: the edit distance, second criteria (only if restricted mode): the popularity
    * of the suggest words in the field of the user index
    */
   public String[] suggestSimilar(String word, int numSug, IndexReader ir,
       String field, boolean morePopular) throws IOException {
-
-    float min = this.minScore;
-    final int lengthWord = word.length();
-
-    final int freq = (ir != null && field != null) ? ir.docFreq(new Term(field, word)) : 0;
-    final int goalFreq = (morePopular && ir != null && field != null) ? freq : 0;
-    // if the word exists in the real index and we don't care for word frequency, return the word itself
-    if (!morePopular && freq > 0) {
-      return new String[] { word };
-    }
-
-    BooleanQuery query = new BooleanQuery();
-    String[] grams;
-    String key;
-
-    for (int ng = getMin(lengthWord); ng <= getMax(lengthWord); ng++) {
-
-      key = "gram" + ng; // form key
-
-      grams = formGrams(word, ng); // form word into ngrams (allow dups too)
-
-      if (grams.length == 0) {
-        continue; // hmm
-      }
-
-      if (bStart > 0) { // should we boost prefixes?
-        add(query, "start" + ng, grams[0], bStart); // matches start of word
-
-      }
-      if (bEnd > 0) { // should we boost suffixes
-        add(query, "end" + ng, grams[grams.length - 1], bEnd); // matches end of word
-
-      }
-      for (int i = 0; i < grams.length; i++) {
-        add(query, key, grams[i]);
-      }
-    }
-
-    int maxHits = 10 * numSug;
-    
-//    System.out.println("Q: " + query);
-    ScoreDoc[] hits = searcher.search(query, null, maxHits).scoreDocs;
-//    System.out.println("HITS: " + hits.length());
-    SuggestWordQueue sugQueue = new SuggestWordQueue(numSug);
-
-    // go thru more than 'maxr' matches in case the distance filter triggers
-    int stop = Math.min(hits.length, maxHits);
-    SuggestWord sugWord = new SuggestWord();
-    for (int i = 0; i < stop; i++) {
-
-      sugWord.string = searcher.doc(hits[i].doc).get(F_WORD); // get orig word
-
-      // don't suggest a word for itself, that would be silly
-      if (sugWord.string.equals(word)) {
-        continue;
+    // obtainSearcher calls ensureOpen
+    final IndexSearcher indexSearcher = obtainSearcher();
+    try{
+      float min = this.minScore;
+      final int lengthWord = word.length();
+  
+      final int freq = (ir != null && field != null) ? ir.docFreq(new Term(field, word)) : 0;
+      final int goalFreq = (morePopular && ir != null && field != null) ? freq : 0;
+      // if the word exists in the real index and we don't care for word frequency, return the word itself
+      if (!morePopular && freq > 0) {
+        return new String[] { word };
       }
-
-      // edit distance
-      sugWord.score = sd.getDistance(word,sugWord.string);
-      if (sugWord.score < min) {
-        continue;
+  
+      BooleanQuery query = new BooleanQuery();
+      String[] grams;
+      String key;
+  
+      for (int ng = getMin(lengthWord); ng <= getMax(lengthWord); ng++) {
+  
+        key = "gram" + ng; // form key
+  
+        grams = formGrams(word, ng); // form word into ngrams (allow dups too)
+  
+        if (grams.length == 0) {
+          continue; // hmm
+        }
+  
+        if (bStart > 0) { // should we boost prefixes?
+          add(query, "start" + ng, grams[0], bStart); // matches start of word
+  
+        }
+        if (bEnd > 0) { // should we boost suffixes
+          add(query, "end" + ng, grams[grams.length - 1], bEnd); // matches end of word
+  
+        }
+        for (int i = 0; i < grams.length; i++) {
+          add(query, key, grams[i]);
+        }
       }
-
-      if (ir != null && field != null) { // use the user index
-        sugWord.freq = ir.docFreq(new Term(field, sugWord.string)); // freq in the index
-        // don't suggest a word that is not present in the field
-        if ((morePopular && goalFreq > sugWord.freq) || sugWord.freq < 1) {
+  
+      int maxHits = 10 * numSug;
+      
+  //    System.out.println("Q: " + query);
+      ScoreDoc[] hits = indexSearcher.search(query, null, maxHits).scoreDocs;
+  //    System.out.println("HITS: " + hits.length());
+      SuggestWordQueue sugQueue = new SuggestWordQueue(numSug);
+  
+      // go thru more than 'maxr' matches in case the distance filter triggers
+      int stop = Math.min(hits.length, maxHits);
+      SuggestWord sugWord = new SuggestWord();
+      for (int i = 0; i < stop; i++) {
+  
+        sugWord.string = indexSearcher.doc(hits[i].doc).get(F_WORD); // get orig word
+  
+        // don't suggest a word for itself, that would be silly
+        if (sugWord.string.equals(word)) {
+          continue;
+        }
+  
+        // edit distance
+        sugWord.score = sd.getDistance(word,sugWord.string);
+        if (sugWord.score < min) {
           continue;
         }
+  
+        if (ir != null && field != null) { // use the user index
+          sugWord.freq = ir.docFreq(new Term(field, sugWord.string)); // freq in the index
+          // don't suggest a word that is not present in the field
+          if ((morePopular && goalFreq > sugWord.freq) || sugWord.freq < 1) {
+            continue;
+          }
+        }
+        sugQueue.insertWithOverflow(sugWord);
+        if (sugQueue.size() == numSug) {
+          // if queue full, maintain the minScore score
+          min = sugQueue.top().score;
+        }
+        sugWord = new SuggestWord();
       }
-      sugQueue.insertWithOverflow(sugWord);
-      if (sugQueue.size() == numSug) {
-        // if queue full, maintain the minScore score
-        min = sugQueue.top().score;
+  
+      // convert to array string
+      String[] list = new String[sugQueue.size()];
+      for (int i = sugQueue.size() - 1; i >= 0; i--) {
+        list[i] = sugQueue.pop().string;
       }
-      sugWord = new SuggestWord();
-    }
-
-    // convert to array string
-    String[] list = new String[sugQueue.size()];
-    for (int i = sugQueue.size() - 1; i >= 0; i--) {
-      list[i] = sugQueue.pop().string;
+  
+      return list;
+    } finally {
+      releaseSearcher(indexSearcher);
     }
-
-    return list;
   }
 
   /**
@@ -297,24 +347,33 @@
   /**
    * Removes all terms from the spell check index.
    * @throws IOException
+   * @throws AlreadyClosedException if the Spellchecker is already closed
    */
   public void clearIndex() throws IOException {
-    IndexWriter writer = new IndexWriter(spellIndex, null, true, IndexWriter.MaxFieldLength.UNLIMITED);
-    writer.close();
-    
-    //close the old searcher
-    searcher.close();
-    searcher = new IndexSearcher(this.spellIndex, true);
+    synchronized (modifyCurrentIndexLock) {
+      ensureOpen();
+      final Directory dir = this.spellIndex;
+      final IndexWriter writer = new IndexWriter(dir, null, true, IndexWriter.MaxFieldLength.UNLIMITED);
+      writer.close();
+      swapSearcher(dir);
+    }
   }
 
   /**
    * Check whether the word exists in the index.
    * @param word
    * @throws IOException
-   * @return true iff the word exists in the index
+   * @throws AlreadyClosedException if the Spellchecker is already closed
+   * @return true if the word exists in the index
    */
   public boolean exist(String word) throws IOException {
-    return searcher.docFreq(new Term(F_WORD, word)) > 0;
+    // obtainSearcher calls ensureOpen
+    final IndexSearcher indexSearcher = obtainSearcher();
+    try{
+      return indexSearcher.docFreq(F_WORD_TERM.createTerm(word)) > 0;
+    } finally {
+      releaseSearcher(indexSearcher);
+    }
   }
 
   /**
@@ -322,37 +381,42 @@
    * @param dict Dictionary to index
    * @param mergeFactor mergeFactor to use when indexing
    * @param ramMB the max amount or memory in MB to use
+   * @throws AlreadyClosedException if the Spellchecker is already closed
    * @throws IOException
    */
   public void indexDictionary(Dictionary dict, int mergeFactor, int ramMB) throws IOException {
-    IndexWriter writer = new IndexWriter(spellIndex, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
-    writer.setMergeFactor(mergeFactor);
-    writer.setRAMBufferSizeMB(ramMB);
-
-    Iterator<String> iter = dict.getWordsIterator();
-    while (iter.hasNext()) {
-      String word = iter.next();
-
-      int len = word.length();
-      if (len < 3) {
-        continue; // too short we bail but "too long" is fine...
-      }
-
-      if (this.exist(word)) { // if the word already exist in the gramindex
-        continue;
-      }
-
-      // ok index the word
-      Document doc = createDocument(word, getMin(len), getMax(len));
-      writer.addDocument(doc);
-    }
-    // close writer
-    writer.optimize();
-    writer.close();
-    // also re-open the spell index to see our own changes when the next suggestion
-    // is fetched:
-    searcher.close();
-    searcher = new IndexSearcher(this.spellIndex, true);
+    synchronized (modifyCurrentIndexLock) {
+      ensureOpen();
+      final Directory dir = this.spellIndex;
+      final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+          IndexWriter.MaxFieldLength.UNLIMITED);
+      writer.setMergeFactor(mergeFactor);
+      writer.setRAMBufferSizeMB(ramMB);
+  
+      Iterator<String> iter = dict.getWordsIterator();
+      while (iter.hasNext()) {
+        String word = iter.next();
+  
+        int len = word.length();
+        if (len < 3) {
+          continue; // too short we bail but "too long" is fine...
+        }
+  
+        if (this.exist(word)) { // if the word already exist in the gramindex
+          continue;
+        }
+  
+        // ok index the word
+        Document doc = createDocument(word, getMin(len), getMax(len));
+        writer.addDocument(doc);
+      }
+      // close writer
+      writer.optimize();
+      writer.close();
+      // also re-open the spell index to see our own changes when the next suggestion
+      // is fetched:
+      swapSearcher(dir);
+    }
   }
 
   /**
@@ -364,7 +428,7 @@
     indexDictionary(dict, 300, 10);
   }
 
-  private int getMin(int l) {
+  private static int getMin(int l) {
     if (l > 5) {
       return 3;
     }
@@ -374,7 +438,7 @@
     return 1;
   }
 
-  private int getMax(int l) {
+  private static int getMax(int l) {
     if (l > 5) {
       return 4;
     }
@@ -409,4 +473,84 @@
       }
     }
   }
+  
+  private IndexSearcher obtainSearcher() {
+    synchronized (searcherLock) {
+      ensureOpen();
+      searcher.getIndexReader().incRef();
+      return searcher;
+    }
+  }
+  
+  private void releaseSearcher(final IndexSearcher aSearcher) throws IOException{
+      // don't check if open - always decRef 
+      // don't decrement the private searcher - could have been swapped
+      aSearcher.getIndexReader().decRef();      
+  }
+  
+  private void ensureOpen() {
+    if (closed) {
+      throw new AlreadyClosedException("Spellchecker has been closed");
+    }
+  }
+  
+  /**
+   * Close the IndexSearcher used by this SpellChecker
+   * @throws IOException if the close operation causes an {@link IOException}
+   * @throws AlreadyClosedException if the {@link SpellChecker} is already closed
+   */
+  public void close() throws IOException {
+    synchronized (searcherLock) {
+      ensureOpen();
+      closed = true;
+      if (searcher != null) {
+        searcher.close();
+      }
+      searcher = null;
+    }
+  }
+  
+  private void swapSearcher(final Directory dir) throws IOException {
+    /*
+     * opening a searcher is possibly very expensive.
+     * We rather close it again if the Spellchecker was closed during
+     * this operation than block access to the current searcher while opening.
+     */
+    final IndexSearcher indexSearcher = createSearcher(dir);
+    synchronized (searcherLock) {
+      if(closed){
+        indexSearcher.close();
+        throw new AlreadyClosedException("Spellchecker has been closed");
+      }
+      if (searcher != null) {
+        searcher.close();
+      }
+      // set the spellindex in the sync block - ensure consistency.
+      searcher = indexSearcher;
+      this.spellIndex = dir;
+    }
+  }
+  
+  /**
+   * Creates a new read-only IndexSearcher 
+   * @param dir the directory used to open the searcher
+   * @return a new read-only IndexSearcher
+   * @throws IOException f there is a low-level IO error
+   */
+  // for testing purposes
+  IndexSearcher createSearcher(final Directory dir) throws IOException{
+    return new IndexSearcher(dir, true);
+  }
+  
+  /**
+   * Returns <code>true</code> if and only if the {@link SpellChecker} is
+   * closed, otherwise <code>false</code>.
+   * 
+   * @return <code>true</code> if and only if the {@link SpellChecker} is
+   *         closed, otherwise <code>false</code>.
+   */
+  boolean isClosed(){
+    return closed;
+  }
+  
 }

Modified: lucene/java/branches/flex_1458/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (original)
+++ lucene/java/branches/flex_1458/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java Fri Dec 11 16:22:30 2009
@@ -18,8 +18,13 @@
  */
 
 import java.io.IOException;
-
-import junit.framework.TestCase;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.lucene.analysis.SimpleAnalyzer;
 import org.apache.lucene.document.Document;
@@ -27,9 +32,12 @@
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
 
 
 /**
@@ -37,9 +45,11 @@
  *
  *
  */
-public class TestSpellChecker extends TestCase {
-  private SpellChecker spellChecker;
+public class TestSpellChecker extends LuceneTestCase {
+  private SpellCheckerMock spellChecker;
   private Directory userindex, spellindex;
+  private final Random random = newRandom();
+  private List<IndexSearcher> searchers;
 
   @Override
   protected void setUp() throws Exception {
@@ -56,10 +66,10 @@
       writer.addDocument(doc);
     }
     writer.close();
-
+    searchers = Collections.synchronizedList(new ArrayList<IndexSearcher>());
     // create the spellChecker
     spellindex = new RAMDirectory();
-    spellChecker = new SpellChecker(spellindex);
+    spellChecker = new SpellCheckerMock(spellindex);
   }
 
 
@@ -75,7 +85,9 @@
     int num_field2 = this.numdoc();
 
     assertEquals(num_field2, num_field1 + 1);
-
+    
+    assertLastSearcherOpen(4);
+    
     checkCommonSuggestions(r);
     checkLevenshteinSuggestions(r);
     
@@ -201,4 +213,186 @@
     return num;
   }
   
+  public void testClose() throws IOException {
+    IndexReader r = IndexReader.open(userindex, true);
+    spellChecker.clearIndex();
+    String field = "field1";
+    addwords(r, "field1");
+    int num_field1 = this.numdoc();
+    addwords(r, "field2");
+    int num_field2 = this.numdoc();
+    assertEquals(num_field2, num_field1 + 1);
+    checkCommonSuggestions(r);
+    assertLastSearcherOpen(4);
+    spellChecker.close();
+    assertSearchersClosed();
+    try {
+      spellChecker.close();
+      fail("spellchecker was already closed");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+    try {
+      checkCommonSuggestions(r);
+      fail("spellchecker was already closed");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+    
+    try {
+      spellChecker.clearIndex();
+      fail("spellchecker was already closed");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+    
+    try {
+      spellChecker.indexDictionary(new LuceneDictionary(r, field));
+      fail("spellchecker was already closed");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+    
+    try {
+      spellChecker.setSpellIndex(spellindex);
+      fail("spellchecker was already closed");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+    assertEquals(4, searchers.size());
+    assertSearchersClosed();
+  }
+  
+  /*
+   * tests if the internally shared indexsearcher is correctly closed 
+   * when the spellchecker is concurrently accessed and closed.
+   */
+  public void testConcurrentAccess() throws IOException, InterruptedException {
+    assertEquals(1, searchers.size());
+    final IndexReader r = IndexReader.open(userindex, true);
+    spellChecker.clearIndex();
+    assertEquals(2, searchers.size());
+    addwords(r, "field1");
+    assertEquals(3, searchers.size());
+    int num_field1 = this.numdoc();
+    addwords(r, "field2");
+    assertEquals(4, searchers.size());
+    int num_field2 = this.numdoc();
+    assertEquals(num_field2, num_field1 + 1);
+    int numThreads = 5 + this.random.nextInt(5);
+    ExecutorService executor = Executors.newFixedThreadPool(numThreads);
+    SpellCheckWorker[] workers = new SpellCheckWorker[numThreads];
+    for (int i = 0; i < numThreads; i++) {
+      SpellCheckWorker spellCheckWorker = new SpellCheckWorker(r);
+      executor.execute(spellCheckWorker);
+      workers[i] = spellCheckWorker;
+      
+    }
+    int iterations = 5 + random.nextInt(5);
+    for (int i = 0; i < iterations; i++) {
+      Thread.sleep(100);
+      // concurrently reset the spell index
+      spellChecker.setSpellIndex(this.spellindex);
+      // for debug - prints the internal open searchers 
+      // showSearchersOpen();
+    }
+    
+    spellChecker.close();
+    executor.shutdown();
+    executor.awaitTermination(5, TimeUnit.SECONDS);
+    
+    
+    for (int i = 0; i < workers.length; i++) {
+      assertFalse(workers[i].failed);
+      assertTrue(workers[i].terminated);
+    }
+    // 4 searchers more than iterations
+    // 1. at creation
+    // 2. clearIndex()
+    // 2. and 3. during addwords
+    assertEquals(iterations + 4, searchers.size());
+    assertSearchersClosed();
+    
+  }
+  
+  private void assertLastSearcherOpen(int numSearchers) {
+    assertEquals(numSearchers, searchers.size());
+    IndexSearcher[] searcherArray = searchers.toArray(new IndexSearcher[0]);
+    for (int i = 0; i < searcherArray.length; i++) {
+      if (i == searcherArray.length - 1) {
+        assertTrue("expected last searcher open but was closed",
+            searcherArray[i].getIndexReader().getRefCount() > 0);
+      } else {
+        assertFalse("expected closed searcher but was open - Index: " + i,
+            searcherArray[i].getIndexReader().getRefCount() > 0);
+      }
+    }
+  }
+  
+  private void assertSearchersClosed() {
+    for (IndexSearcher searcher : searchers) {
+      assertEquals(0, searcher.getIndexReader().getRefCount());
+    }
+  }
+  
+  private void showSearchersOpen() {
+    int count = 0;
+    for (IndexSearcher searcher : searchers) {
+      if(searcher.getIndexReader().getRefCount() > 0)
+        ++count;
+    } 
+    System.out.println(count);
+  }
+
+  
+  private class SpellCheckWorker implements Runnable {
+    private final IndexReader reader;
+    boolean terminated = false;
+    boolean failed = false;
+    
+    SpellCheckWorker(IndexReader reader) {
+      super();
+      this.reader = reader;
+    }
+    
+    public void run() {
+      try {
+        while (true) {
+          try {
+            checkCommonSuggestions(reader);
+          } catch (AlreadyClosedException e) {
+            
+            return;
+          } catch (Throwable e) {
+            
+            e.printStackTrace();
+            failed = true;
+            return;
+          }
+        }
+      } finally {
+        terminated = true;
+      }
+    }
+    
+  }
+  
+  class SpellCheckerMock extends SpellChecker {
+    public SpellCheckerMock(Directory spellIndex) throws IOException {
+      super(spellIndex);
+    }
+
+    public SpellCheckerMock(Directory spellIndex, StringDistance sd)
+        throws IOException {
+      super(spellIndex, sd);
+    }
+
+    @Override
+    IndexSearcher createSearcher(Directory dir) throws IOException {
+      IndexSearcher searcher = super.createSearcher(dir);
+      TestSpellChecker.this.searchers.add(searcher);
+      return searcher;
+    }
+  }
+  
 }

Modified: lucene/java/branches/flex_1458/docs/contributions.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/contributions.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/contributions.html (original)
+++ lucene/java/branches/flex_1458/docs/contributions.html Fri Dec 11 16:22:30 2009
@@ -153,15 +153,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">

Modified: lucene/java/branches/flex_1458/docs/demo.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/demo.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/demo.html (original)
+++ lucene/java/branches/flex_1458/docs/demo.html Fri Dec 11 16:22:30 2009
@@ -153,15 +153,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">

Modified: lucene/java/branches/flex_1458/docs/demo2.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/demo2.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/demo2.html (original)
+++ lucene/java/branches/flex_1458/docs/demo2.html Fri Dec 11 16:22:30 2009
@@ -153,15 +153,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">

Modified: lucene/java/branches/flex_1458/docs/demo3.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/demo3.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/demo3.html (original)
+++ lucene/java/branches/flex_1458/docs/demo3.html Fri Dec 11 16:22:30 2009
@@ -153,15 +153,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">

Modified: lucene/java/branches/flex_1458/docs/demo4.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/demo4.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/demo4.html (original)
+++ lucene/java/branches/flex_1458/docs/demo4.html Fri Dec 11 16:22:30 2009
@@ -153,15 +153,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">

Modified: lucene/java/branches/flex_1458/docs/fileformats.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/fileformats.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/fileformats.html (original)
+++ lucene/java/branches/flex_1458/docs/fileformats.html Fri Dec 11 16:22:30 2009
@@ -153,15 +153,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">

Modified: lucene/java/branches/flex_1458/docs/gettingstarted.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/gettingstarted.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/gettingstarted.html (original)
+++ lucene/java/branches/flex_1458/docs/gettingstarted.html Fri Dec 11 16:22:30 2009
@@ -153,15 +153,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">

Modified: lucene/java/branches/flex_1458/docs/index.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/index.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/index.html (original)
+++ lucene/java/branches/flex_1458/docs/index.html Fri Dec 11 16:22:30 2009
@@ -151,15 +151,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">

Modified: lucene/java/branches/flex_1458/docs/linkmap.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/docs/linkmap.html?rev=889683&r1=889682&r2=889683&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/docs/linkmap.html (original)
+++ lucene/java/branches/flex_1458/docs/linkmap.html Fri Dec 11 16:22:30 2009
@@ -151,15 +151,15 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>
 </div>
 <div class="menuitem">
-<a href="api/contrib-collation/index.html">Collation</a>
-</div>
-<div class="menuitem">
 <a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>
 </div>
 <div class="menuitem">
 <a href="api/contrib-highlighter/index.html">Highlighter</a>
 </div>
 <div class="menuitem">
+<a href="api/contrib-icu/index.html">ICU</a>
+</div>
+<div class="menuitem">
 <a href="api/contrib-instantiated/index.html">Instantiated</a>
 </div>
 <div class="menuitem">
@@ -367,22 +367,22 @@
 <a href="api/contrib-benchmark/index.html">Benchmark</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>javadoc-contrib-benchmark</em>
 </li>
 </ul>
-        
+		    
 <ul>
 <li>
-<a href="api/contrib-collation/index.html">Collation</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>javadoc-contrib-collation</em>
+<a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>javadoc-contrib-fast-vector-highlighter</em>
 </li>
 </ul>
 		    
 <ul>
 <li>
-<a href="api/contrib-fast-vector-highlighter/index.html">Fast Vector Highlighter</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>javadoc-contrib-fast-vector-highlighter</em>
+<a href="api/contrib-highlighter/index.html">Highlighter</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>javadoc-contrib-highlighter</em>
 </li>
 </ul>
 		    
 <ul>
 <li>
-<a href="api/contrib-highlighter/index.html">Highlighter</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>javadoc-contrib-highlighter</em>
+<a href="api/contrib-icu/index.html">ICU</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>javadoc-contrib-icu</em>
 </li>
 </ul>
 		    



Mime
View raw message