lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From uschind...@apache.org
Subject svn commit: r931278 [10/10] - in /lucene/dev/trunk: lucene/ lucene/backwards/src/ lucene/backwards/src/java/org/apache/lucene/index/ lucene/backwards/src/java/org/apache/lucene/index/codecs/ lucene/backwards/src/java/org/apache/lucene/search/ lucene/ba...
Date Tue, 06 Apr 2010 19:19:36 GMT
Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing.java Tue Apr  6 19:19:27 2010
@@ -26,7 +26,7 @@ import org.apache.lucene.search.*;
 import java.util.Random;
 import java.io.File;
 
-public class TestStressIndexing extends LuceneTestCase {
+public class TestStressIndexing extends MultiCodecTestCase {
   private Random RANDOM;
 
   private static abstract class TimedThread extends Thread {
@@ -152,6 +152,8 @@ public class TestStressIndexing extends 
 
     modifier.close();
 
+    FlexTestUtil.verifyFlexVsPreFlex(RANDOM, directory);
+
     for(int i=0;i<numThread;i++)
       assertTrue(! threads[i].failed);
 

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java Tue Apr  6 19:19:27 2010
@@ -24,6 +24,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import org.apache.lucene.util.*;
 
 import junit.framework.Assert;
 
@@ -35,11 +36,8 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.StringHelper;
-import org.apache.lucene.util._TestUtil;
 
-public class TestStressIndexing2 extends LuceneTestCase {
+public class TestStressIndexing2 extends MultiCodecTestCase {
   static int maxFields=4;
   static int bigFieldSize=10;
   static boolean sameFieldOrder=false;
@@ -73,6 +71,8 @@ public class TestStressIndexing2 extends
     IndexReader r = dw.writer.getReader();
     dw.writer.commit();
     verifyEquals(r, dir, "id");
+    FlexTestUtil.verifyFlexVsPreFlex(this.r, r);
+    FlexTestUtil.verifyFlexVsPreFlex(this.r, dir);
     r.close();
     dw.writer.close();
     dir.close();
@@ -94,11 +94,15 @@ public class TestStressIndexing2 extends
     // verifyEquals(dir2, dir2, "id");
 
     verifyEquals(dir1, dir2, "id");
+    FlexTestUtil.verifyFlexVsPreFlex(r, dir1);
+    FlexTestUtil.verifyFlexVsPreFlex(r, dir2);
   }
 
   public void testMultiConfig() throws Throwable {
     // test lots of smaller different params together
+
     r = newRandom();
+
     for (int i=0; i<20; i++) {  // increase iterations for better testing
       sameFieldOrder=r.nextBoolean();
       mergeFactor=r.nextInt(3)+2;
@@ -113,8 +117,13 @@ public class TestStressIndexing2 extends
       Directory dir1 = new MockRAMDirectory();
       Directory dir2 = new MockRAMDirectory();
       Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
+      //System.out.println("TEST: index serial");
       indexSerial(docs, dir2);
+      //System.out.println("TEST: verify");
       verifyEquals(dir1, dir2, "id");
+
+      FlexTestUtil.verifyFlexVsPreFlex(r, dir1);
+      FlexTestUtil.verifyFlexVsPreFlex(r, dir2);
     }
   }
 
@@ -216,7 +225,7 @@ public class TestStressIndexing2 extends
         threads[i].join();
       }
 
-      // w.optimize();
+      //w.optimize();
       w.close();    
 
       for (int i=0; i<threads.length; i++) {
@@ -227,6 +236,7 @@ public class TestStressIndexing2 extends
       }
     }
 
+    //System.out.println("TEST: checkindex");
     _TestUtil.checkIndex(dir);
 
     return docs;
@@ -278,31 +288,65 @@ public class TestStressIndexing2 extends
 
     int[] r2r1 = new int[r2.maxDoc()];   // r2 id to r1 id mapping
 
-    TermDocs termDocs1 = r1.termDocs();
-    TermDocs termDocs2 = r2.termDocs();
-
     // create mapping from id2 space to id2 based on idField
     idField = StringHelper.intern(idField);
-    TermEnum termEnum = r1.terms (new Term (idField, ""));
-    do {
-      Term term = termEnum.term();
-      if (term==null || term.field() != idField) break;
+    final Fields f1 = MultiFields.getFields(r1);
+    if (f1 == null) {
+      // make sure r2 is empty
+      assertNull(MultiFields.getFields(r2));
+      return;
+    }
+    final Terms terms1 = f1.terms(idField);
+    if (terms1 == null) {
+      assertTrue(MultiFields.getFields(r2) == null ||
+                 MultiFields.getFields(r2).terms(idField) == null);
+      return;
+    }
+    final TermsEnum termsEnum = terms1.iterator();
 
-      termDocs1.seek (termEnum);
-      if (!termDocs1.next()) {
+    final Bits delDocs1 = MultiFields.getDeletedDocs(r1);
+    final Bits delDocs2 = MultiFields.getDeletedDocs(r2);
+    
+    Fields fields = MultiFields.getFields(r2);
+    if (fields == null) {
+      // make sure r1 is in fact empty (eg has only all
+      // deleted docs):
+      DocsEnum docs = null;
+      while(termsEnum.next() != null) {
+        docs = termsEnum.docs(delDocs1, docs);
+        while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+          fail("r1 is not empty but r2 is");
+        }
+      }
+      return;
+    }
+    Terms terms2 = fields.terms(idField);
+
+    DocsEnum termDocs1 = null;
+    DocsEnum termDocs2 = null;
+
+    while(true) {
+      BytesRef term = termsEnum.next();
+      //System.out.println("TEST: match id term=" + term);
+      if (term == null) {
+        break;
+      }
+
+      termDocs1 = termsEnum.docs(delDocs1, termDocs1);
+      termDocs2 = terms2.docs(delDocs2, term, termDocs2);
+
+      if (termDocs1.nextDoc() == DocsEnum.NO_MORE_DOCS) {
         // This doc is deleted and wasn't replaced
-        termDocs2.seek(termEnum);
-        assertFalse(termDocs2.next());
+        assertTrue(termDocs2 == null || termDocs2.nextDoc() == DocsEnum.NO_MORE_DOCS);
         continue;
       }
 
-      int id1 = termDocs1.doc();
-      assertFalse(termDocs1.next());
+      int id1 = termDocs1.docID();
+      assertEquals(DocsEnum.NO_MORE_DOCS, termDocs1.nextDoc());
 
-      termDocs2.seek(termEnum);
-      assertTrue(termDocs2.next());
-      int id2 = termDocs2.doc();
-      assertFalse(termDocs2.next());
+      assertTrue(termDocs2.nextDoc() != DocsEnum.NO_MORE_DOCS);
+      int id2 = termDocs2.docID();
+      assertEquals(DocsEnum.NO_MORE_DOCS, termDocs2.nextDoc());
 
       r2r1[id2] = id1;
 
@@ -336,73 +380,108 @@ public class TestStressIndexing2 extends
         throw e;
       }
 
-    } while (termEnum.next());
+    }
 
-    termEnum.close();
+    //System.out.println("TEST: done match id");
 
     // Verify postings
-    TermEnum termEnum1 = r1.terms (new Term ("", ""));
-    TermEnum termEnum2 = r2.terms (new Term ("", ""));
+    //System.out.println("TEST: create te1");
+    final FieldsEnum fields1 = MultiFields.getFields(r1).iterator();
+    final FieldsEnum fields2 = MultiFields.getFields(r2).iterator();
+
+    String field1=null, field2=null;
+    TermsEnum termsEnum1 = null;
+    TermsEnum termsEnum2 = null;
+    DocsEnum docs1=null, docs2=null;
 
     // pack both doc and freq into single element for easy sorting
     long[] info1 = new long[r1.numDocs()];
     long[] info2 = new long[r2.numDocs()];
 
     for(;;) {
-      Term term1,term2;
+      BytesRef term1=null, term2=null;
 
       // iterate until we get some docs
       int len1;
       for(;;) {
         len1=0;
-        term1 = termEnum1.term();
-        if (term1==null) break;
-        termDocs1.seek(termEnum1);
-        while (termDocs1.next()) {
-          int d1 = termDocs1.doc();
-          int f1 = termDocs1.freq();
-          info1[len1] = (((long)d1)<<32) | f1;
+        if (termsEnum1 == null) {
+          field1 = fields1.next();
+          if (field1 == null) {
+            break;
+          } else {
+            termsEnum1 = fields1.terms();
+          }
+        }
+        term1 = termsEnum1.next();
+        if (term1 == null) {
+          // no more terms in this field
+          termsEnum1 = null;
+          continue;
+        }
+        
+        //System.out.println("TEST: term1=" + term1);
+        docs1 = termsEnum1.docs(delDocs1, docs1);
+        while (docs1.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+          int d = docs1.docID();
+          int f = docs1.freq();
+          info1[len1] = (((long)d)<<32) | f;
           len1++;
         }
         if (len1>0) break;
-        if (!termEnum1.next()) break;
       }
 
-       // iterate until we get some docs
+      // iterate until we get some docs
       int len2;
       for(;;) {
         len2=0;
-        term2 = termEnum2.term();
-        if (term2==null) break;
-        termDocs2.seek(termEnum2);
-        while (termDocs2.next()) {
-          int d2 = termDocs2.doc();
-          int f2 = termDocs2.freq();
-          info2[len2] = (((long)r2r1[d2])<<32) | f2;
+        if (termsEnum2 == null) {
+          field2 = fields2.next();
+          if (field2 == null) {
+            break;
+          } else {
+            termsEnum2 = fields2.terms();
+          }
+        }
+        term2 = termsEnum2.next();
+        if (term2 == null) {
+          // no more terms in this field
+          termsEnum2 = null;
+          continue;
+        }
+        
+        //System.out.println("TEST: term1=" + term1);
+        docs2 = termsEnum2.docs(delDocs2, docs2);
+        while (docs2.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+          int d = r2r1[docs2.docID()];
+          int f = docs2.freq();
+          info2[len2] = (((long)d)<<32) | f;
           len2++;
         }
         if (len2>0) break;
-        if (!termEnum2.next()) break;
       }
 
-      if (!hasDeletes)
-        assertEquals(termEnum1.docFreq(), termEnum2.docFreq());
-
       assertEquals(len1, len2);
       if (len1==0) break;  // no more terms
 
-      assertEquals(term1, term2);
+      assertEquals(field1, field2);
+      assertTrue(term1.bytesEquals(term2));
+
+      if (!hasDeletes)
+        assertEquals(termsEnum1.docFreq(), termsEnum2.docFreq());
+
+      assertEquals("len1=" + len1 + " len2=" + len2 + " deletes?=" + hasDeletes, term1, term2);
 
       // sort info2 to get it into ascending docid
       Arrays.sort(info2, 0, len2);
 
       // now compare
       for (int i=0; i<len1; i++) {
-        assertEquals(info1[i], info2[i]);
+        assertEquals("i=" + i + " len=" + len1 + " d1=" + (info1[i]>>>32) + " f1=" + (info1[i]&Integer.MAX_VALUE) + " d2=" + (info2[i]>>>32) + " f2=" + (info2[i]&Integer.MAX_VALUE) +
+                     " field=" + field1 + " term=" + term1.utf8ToString(),
+                     info1[i],
+                     info2[i]);
       }
-
-      termEnum1.next();
-      termEnum2.next();
     }
   }
 
@@ -424,9 +503,9 @@ public class TestStressIndexing2 extends
         String s1 = f1.stringValue();
         String s2 = f2.stringValue();
         assertEquals(ff1 + " : " + ff2, s1,s2);
+        }
       }
     }
-  }
 
   public static void verifyEquals(TermFreqVector[] d1, TermFreqVector[] d2) {
     if (d1 == null) {
@@ -657,5 +736,4 @@ public class TestStressIndexing2 extends
       }
     }
   }
-
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/CheckHits.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/CheckHits.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/CheckHits.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/CheckHits.java Tue Apr  6 19:19:27 2010
@@ -33,7 +33,7 @@ public class CheckHits {
    * different  order of operations from the actual scoring method ...
    * this allows for a small amount of variation
    */
-  public static float EXPLAIN_SCORE_TOLERANCE_DELTA = 0.00005f;
+  public static float EXPLAIN_SCORE_TOLERANCE_DELTA = 0.0002f;
     
   /**
    * Tests that all documents up to maxDoc which are *not* in the

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java Tue Apr  6 19:19:27 2010
@@ -24,7 +24,8 @@ import org.apache.lucene.document.FieldS
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.PriorityQueue;
 
 /**
@@ -202,7 +203,7 @@ final class JustCompileSearch {
   
   static final class JustCompileExtendedFieldCacheLongParser implements FieldCache.LongParser {
 
-    public long parseLong(String string) {
+    public long parseLong(BytesRef string) {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
     
@@ -210,7 +211,7 @@ final class JustCompileSearch {
   
   static final class JustCompileExtendedFieldCacheDoubleParser implements FieldCache.DoubleParser {
     
-    public double parseDouble(String string) {
+    public double parseDouble(BytesRef term) {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
     
@@ -318,9 +319,9 @@ final class JustCompileSearch {
 
   static final class JustCompilePhraseScorer extends PhraseScorer {
 
-    JustCompilePhraseScorer(Weight weight, TermPositions[] tps, int[] offsets,
+    JustCompilePhraseScorer(Weight weight, DocsAndPositionsEnum[] docs, int[] offsets,
         Similarity similarity, byte[] norms) {
-      super(weight, tps, offsets, similarity, norms);
+      super(weight, docs, offsets, similarity, norms);
     }
 
     @Override

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/QueryUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/QueryUtils.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/QueryUtils.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/QueryUtils.java Tue Apr  6 19:19:27 2010
@@ -391,7 +391,6 @@ public class QueryUtils {
       }
       @Override
       public void collect(int doc) throws IOException {
-        //System.out.println("doc="+doc);
         float score = scorer.score();
         try {
           

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java Tue Apr  6 19:19:27 2010
@@ -67,7 +67,7 @@ public class TestCachingWrapperFilter ex
     if (originalSet.isCacheable()) {
       assertEquals("Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass());
     } else {
-      assertTrue("Cached DocIdSet must be an OpenBitSet if the original one was not cacheable", cachedSet instanceof OpenBitSetDISI);
+      assertTrue("Cached DocIdSet must be an OpenBitSet if the original one was not cacheable", cachedSet instanceof OpenBitSetDISI || cachedSet == DocIdSet.EMPTY_DOCIDSET);
     }
   }
   

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java Tue Apr  6 19:19:27 2010
@@ -62,7 +62,7 @@ public class TestFilteredSearch extends 
     searchFiltered(writer, directory, filter, enforceSingleSegment);
   }
 
-  public void searchFiltered(IndexWriter writer, Directory directory, Filter filter, boolean optimize) {
+  public void searchFiltered(IndexWriter writer, Directory directory, SimpleDocIdSetFilter filter, boolean optimize) {
     try {
       for (int i = 0; i < 60; i++) {//Simple docs
         Document doc = new Document();
@@ -78,6 +78,7 @@ public class TestFilteredSearch extends 
      
      
       IndexSearcher indexSearcher = new IndexSearcher(directory, true);
+      filter.setTopReader(indexSearcher.getIndexReader());
       ScoreDoc[] hits = indexSearcher.search(booleanQuery, filter, 1000).scoreDocs;
       assertEquals("Number of matched documents", 1, hits.length);
 
@@ -89,29 +90,35 @@ public class TestFilteredSearch extends 
   }
  
   public static final class SimpleDocIdSetFilter extends Filter {
-    private int docBase;
     private final int[] docs;
     private int index;
+    private IndexReader topReader;
     public SimpleDocIdSetFilter(int[] docs) {
       this.docs = docs;
     }
+
+    public void setTopReader(IndexReader r) {
+      topReader = r;
+    }
+
     @Override
     public DocIdSet getDocIdSet(IndexReader reader) {
       final OpenBitSet set = new OpenBitSet();
+      int docBase = topReader.getSubReaderDocBase(reader);
       final int limit = docBase+reader.maxDoc();
       for (;index < docs.length; index++) {
         final int docId = docs[index];
         if(docId > limit)
           break;
-        set.set(docId-docBase);
+        if (docId >= docBase) {
+          set.set(docId-docBase);
+        }
       }
-      docBase = limit;
       return set.isEmpty()?null:set;
     }
     
     public void reset(){
       index = 0;
-      docBase = 0;
     }
   }
 

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java Tue Apr  6 19:19:27 2010
@@ -23,17 +23,17 @@ import java.io.IOException;
 
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Tests {@link FuzzyQuery}.
@@ -378,5 +378,10 @@ public class TestFuzzyQuery extends Luce
     doc.add(new Field("field", text, Field.Store.YES, Field.Index.ANALYZED));
     writer.addDocument(doc);
   }
+  
+  @Deprecated
+  public void testBackwardsLayer() {
+    assertTrue(new FuzzyQuery(new Term("dummy", "dummy")).hasNewAPI);
+  }
 
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java Tue Apr  6 19:19:27 2010
@@ -22,14 +22,17 @@ import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.analysis.SimpleAnalyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+
 import java.io.IOException;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.Collections;
 
@@ -45,7 +48,7 @@ public class TestMultiPhraseQuery extend
     }
 
     public void testPhrasePrefix() throws IOException {
-        RAMDirectory indexStore = new RAMDirectory();
+        MockRAMDirectory indexStore = new MockRAMDirectory();
         IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
         add("blueberry pie", writer);
         add("blueberry strudel", writer);
@@ -101,6 +104,7 @@ public class TestMultiPhraseQuery extend
                 termsWithPrefix.add(te.term());
             }
         } while (te.next());
+        ir.close();
         query3.add(termsWithPrefix.toArray(new Term[0]));
         query3.add(new Term("body", "pizza"));
 
@@ -139,7 +143,7 @@ public class TestMultiPhraseQuery extend
       // and all terms required.
       // The contained PhraseMultiQuery must contain exactly one term array.
 
-      RAMDirectory indexStore = new RAMDirectory();
+      MockRAMDirectory indexStore = new MockRAMDirectory();
       IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
       add("blueberry pie", writer);
       add("blueberry chewing gum", writer);
@@ -164,10 +168,11 @@ public class TestMultiPhraseQuery extend
 
       assertEquals("Wrong number of hits", 2, hits.length);
       searcher.close();
+      indexStore.close();
   }
     
   public void testPhrasePrefixWithBooleanQuery() throws IOException {
-    RAMDirectory indexStore = new RAMDirectory();
+    MockRAMDirectory indexStore = new MockRAMDirectory();
     IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
         TEST_VERSION_CURRENT, new StandardAnalyzer(
         TEST_VERSION_CURRENT, Collections.emptySet())));
@@ -190,6 +195,23 @@ public class TestMultiPhraseQuery extend
     ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
     assertEquals("Wrong number of hits", 0, hits.length);
     searcher.close();
+    indexStore.close();
+  }
+
+  public void testNoDocs() throws Exception {
+    MockRAMDirectory indexStore = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(indexStore, new StandardAnalyzer(Version.LUCENE_CURRENT, new HashSet(0)), true, IndexWriter.MaxFieldLength.LIMITED);
+    add("a note", "note", writer);
+    writer.close();
+
+    IndexSearcher searcher = new IndexSearcher(indexStore, true);
+
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(new Term("body", "a"));
+    q.add(new Term[] { new Term("body", "nope"), new Term("body", "nope") });
+    assertEquals("Wrong number of hits", 0, searcher.search(q, null, 1).totalHits);
+    searcher.close();
+    indexStore.close();
   }
   
   public void testHashCodeAndEquals(){

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java Tue Apr  6 19:19:27 2010
@@ -24,9 +24,11 @@ import org.apache.lucene.document.Docume
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.NumericField;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriter.MaxFieldLength;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCaseJ4;
 import org.apache.lucene.util.NumericUtils;
 
@@ -331,9 +333,15 @@ public class TestNumericRangeQuery32 ext
       if (lower>upper) {
         int a=lower; lower=upper; upper=a;
       }
+      final BytesRef lowerBytes = new BytesRef(NumericUtils.BUF_SIZE_INT), upperBytes = new BytesRef(NumericUtils.BUF_SIZE_INT);
+      NumericUtils.intToPrefixCoded(lower, 0, lowerBytes);
+      NumericUtils.intToPrefixCoded(upper, 0, upperBytes);
+      // TODO: when new TermRange ctors with BytesRef available, use them and do not convert to string!
+      final String lowerString = lowerBytes.utf8ToString(), upperString = upperBytes.utf8ToString();
+
       // test inclusive range
       NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-      TermRangeQuery cq=new TermRangeQuery(field, NumericUtils.intToPrefixCoded(lower), NumericUtils.intToPrefixCoded(upper), true, true);
+      TermRangeQuery cq=new TermRangeQuery(field, lowerString, upperString, true, true);
       TopDocs tTopDocs = searcher.search(tq, 1);
       TopDocs cTopDocs = searcher.search(cq, 1);
       assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
@@ -341,7 +349,7 @@ public class TestNumericRangeQuery32 ext
       termCountC += cq.getTotalNumberOfTerms();
       // test exclusive range
       tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
-      cq=new TermRangeQuery(field, NumericUtils.intToPrefixCoded(lower), NumericUtils.intToPrefixCoded(upper), false, false);
+      cq=new TermRangeQuery(field, lowerString, upperString, false, false);
       tTopDocs = searcher.search(tq, 1);
       cTopDocs = searcher.search(cq, 1);
       assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
@@ -349,7 +357,7 @@ public class TestNumericRangeQuery32 ext
       termCountC += cq.getTotalNumberOfTerms();
       // test left exclusive range
       tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
-      cq=new TermRangeQuery(field, NumericUtils.intToPrefixCoded(lower), NumericUtils.intToPrefixCoded(upper), false, true);
+      cq=new TermRangeQuery(field, lowerString, upperString, false, true);
       tTopDocs = searcher.search(tq, 1);
       cTopDocs = searcher.search(cq, 1);
       assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
@@ -357,7 +365,7 @@ public class TestNumericRangeQuery32 ext
       termCountC += cq.getTotalNumberOfTerms();
       // test right exclusive range
       tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
-      cq=new TermRangeQuery(field, NumericUtils.intToPrefixCoded(lower), NumericUtils.intToPrefixCoded(upper), true, false);
+      cq=new TermRangeQuery(field, lowerString, upperString, true, false);
       tTopDocs = searcher.search(tq, 1);
       cTopDocs = searcher.search(cq, 1);
       assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
@@ -549,23 +557,24 @@ public class TestNumericRangeQuery32 ext
   }
   
   private void testEnum(int lower, int upper) throws Exception {
-    NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true);
-    FilteredTermEnum termEnum = q.getEnum(searcher.getIndexReader());
-    try {
-      int count = 0;
-      do {
-        final Term t = termEnum.term();
-        if (t != null) {
-          final int val = NumericUtils.prefixCodedToInt(t.text());
-          assertTrue("value not in bounds", val >= lower && val <= upper);
-          count++;
-        } else break;
-      } while (termEnum.next());
-      assertFalse(termEnum.next());
-      if (VERBOSE) System.out.println("TermEnum on 'field4' for range [" + lower + "," + upper + "] contained " + count + " terms.");
-    } finally {
-      termEnum.close();
-    }
+    NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange("field4", 4,
+        lower, upper, true, true);
+    TermsEnum termEnum = q.getTermsEnum(searcher.getIndexReader());
+    int count = 0;
+    while (termEnum.next() != null) {
+      final BytesRef t = termEnum.term();
+      if (t != null) {
+        final int val = NumericUtils.prefixCodedToInt(t);
+        assertTrue("value not in bounds " + val + " >= " + lower + " && "
+            + val + " <= " + upper, val >= lower && val <= upper);
+        count++;
+      } else
+        break;
+    } 
+    assertNull(termEnum.next());
+    if (VERBOSE) System.out.println("TermEnum on 'field4' for range [" + lower + "," + upper
+        + "] contained " + count + " terms.");
+
   }
   
   @Test

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java Tue Apr  6 19:19:27 2010
@@ -26,6 +26,7 @@ import org.apache.lucene.document.Numeri
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCaseJ4;
 import org.apache.lucene.util.NumericUtils;
 
@@ -350,9 +351,15 @@ public class TestNumericRangeQuery64 ext
       if (lower>upper) {
         long a=lower; lower=upper; upper=a;
       }
+      final BytesRef lowerBytes = new BytesRef(NumericUtils.BUF_SIZE_LONG), upperBytes = new BytesRef(NumericUtils.BUF_SIZE_LONG);
+      NumericUtils.longToPrefixCoded(lower, 0, lowerBytes);
+      NumericUtils.longToPrefixCoded(upper, 0, upperBytes);
+      // TODO: when new TermRange ctors with BytesRef available, use them and do not convert to string!
+      final String lowerString = lowerBytes.utf8ToString(), upperString = upperBytes.utf8ToString();
+      
       // test inclusive range
       NumericRangeQuery<Long> tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-      TermRangeQuery cq=new TermRangeQuery(field, NumericUtils.longToPrefixCoded(lower), NumericUtils.longToPrefixCoded(upper), true, true);
+      TermRangeQuery cq=new TermRangeQuery(field, lowerString, upperString, true, true);
       TopDocs tTopDocs = searcher.search(tq, 1);
       TopDocs cTopDocs = searcher.search(cq, 1);
       assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
@@ -360,7 +367,7 @@ public class TestNumericRangeQuery64 ext
       termCountC += cq.getTotalNumberOfTerms();
       // test exclusive range
       tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
-      cq=new TermRangeQuery(field, NumericUtils.longToPrefixCoded(lower), NumericUtils.longToPrefixCoded(upper), false, false);
+      cq=new TermRangeQuery(field, lowerString, upperString, false, false);
       tTopDocs = searcher.search(tq, 1);
       cTopDocs = searcher.search(cq, 1);
       assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
@@ -368,7 +375,7 @@ public class TestNumericRangeQuery64 ext
       termCountC += cq.getTotalNumberOfTerms();
       // test left exclusive range
       tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
-      cq=new TermRangeQuery(field, NumericUtils.longToPrefixCoded(lower), NumericUtils.longToPrefixCoded(upper), false, true);
+      cq=new TermRangeQuery(field, lowerString, upperString, false, true);
       tTopDocs = searcher.search(tq, 1);
       cTopDocs = searcher.search(cq, 1);
       assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
@@ -376,7 +383,7 @@ public class TestNumericRangeQuery64 ext
       termCountC += cq.getTotalNumberOfTerms();
       // test right exclusive range
       tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
-      cq=new TermRangeQuery(field, NumericUtils.longToPrefixCoded(lower), NumericUtils.longToPrefixCoded(upper), true, false);
+      cq=new TermRangeQuery(field, lowerString, upperString, true, false);
       tTopDocs = searcher.search(tq, 1);
       cTopDocs = searcher.search(cq, 1);
       assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
@@ -583,4 +590,9 @@ public class TestNumericRangeQuery64 ext
      // difference to int range is tested in TestNumericRangeQuery32
   }
   
+  @Test @Deprecated
+  public void testBackwardsLayer() {
+    assertTrue(NumericRangeQuery.newLongRange("dummy", null, null, true, true).hasNewAPI);
+  }
+  
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java Tue Apr  6 19:19:27 2010
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.io.StringReader;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Iterator;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.StopFilter;
@@ -61,6 +62,8 @@ import org.apache.lucene.util.LuceneTest
  */
 public class TestPositionIncrement extends LuceneTestCase {
 
+  final static boolean VERBOSE = false;
+
   public void testSetPosition() throws Exception {
     Analyzer analyzer = new Analyzer() {
       @Override
@@ -242,8 +245,8 @@ public class TestPositionIncrement exten
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
         TEST_VERSION_CURRENT, new TestPayloadAnalyzer()));
     Document doc = new Document();
-    doc.add(new Field("content",
-                      new StringReader("a a b c d e a f g h i j a b k k")));
+    doc.add(new Field("content", new StringReader(
+        "a a b c d e a f g h i j a b k k")));
     writer.addDocument(doc);
 
     IndexReader r = writer.getReader();
@@ -271,30 +274,43 @@ public class TestPositionIncrement exten
 
     count = 0;
     boolean sawZero = false;
-    //System.out.println("\ngetPayloadSpans test");
+    if (VERBOSE) {
+      System.out.println("\ngetPayloadSpans test");
+    }
     Spans pspans = snq.getSpans(is.getIndexReader());
     while (pspans.next()) {
-      //System.out.println(pspans.doc() + " - " + pspans.start() + " - "+ pspans.end());
+      if (VERBOSE) {
+        System.out.println("doc " + pspans.doc() + ": span " + pspans.start()
+            + " to " + pspans.end());
+      }
       Collection<byte[]> payloads = pspans.getPayload();
       sawZero |= pspans.start() == 0;
-      count += payloads.size();
+      for (@SuppressWarnings("unused") byte[] bytes : payloads) {
+        count++;
+        if (!VERBOSE) {
+          // do nothing
+        } else {
+          System.out.println("  payload: " + new String((byte[]) bytes));
+        }
+      }
     }
     assertEquals(5, count);
     assertTrue(sawZero);
 
-    //System.out.println("\ngetSpans test");
+    // System.out.println("\ngetSpans test");
     Spans spans = snq.getSpans(is.getIndexReader());
     count = 0;
     sawZero = false;
     while (spans.next()) {
       count++;
       sawZero |= spans.start() == 0;
-      //System.out.println(spans.doc() + " - " + spans.start() + " - " + spans.end());
+      // System.out.println(spans.doc() + " - " + spans.start() + " - " +
+      // spans.end());
     }
     assertEquals(4, count);
     assertTrue(sawZero);
-  
-    //System.out.println("\nPayloadSpanUtil test");
+
+    // System.out.println("\nPayloadSpanUtil test");
 
     sawZero = false;
     PayloadSpanUtil psu = new PayloadSpanUtil(is.getIndexReader());
@@ -355,7 +371,9 @@ class PayloadFilter extends TokenFilter 
       }
       posIncrAttr.setPositionIncrement(posIncr);
       pos += posIncr;
-      // System.out.println("term=" + termAttr.term() + " pos=" + pos);
+      if (TestPositionIncrement.VERBOSE) {
+        System.out.println("term=" + termAttr.term() + " pos=" + pos);
+      }
       i++;
       return true;
     } else {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java Tue Apr  6 19:19:27 2010
@@ -53,5 +53,15 @@ public class TestPrefixQuery extends Luc
     query = new PrefixQuery(new Term("category", "/Computers/Mac"));
     hits = searcher.search(query, null, 1000).scoreDocs;
     assertEquals("One in /Computers/Mac", 1, hits.length);
+
+    query = new PrefixQuery(new Term("category", ""));
+    assertFalse(query.getTermsEnum(searcher.getIndexReader()) instanceof PrefixTermsEnum);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("everything", 3, hits.length);
+  }
+  
+  @Deprecated
+  public void testBackwardsLayer() {
+    assertTrue(new PrefixQuery(new Term("dummy", "dummy")).hasNewAPI);
   }
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java Tue Apr  6 19:19:27 2010
@@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LogMergePolicy;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.FieldValueHitQueue.Entry;
@@ -277,7 +278,7 @@ public class TestSort extends LuceneTest
     sort.setSort(
         new SortField("string", SortField.STRING),
         new SortField("string2", SortField.STRING, true),
-        SortField.FIELD_DOC );
+        SortField.FIELD_DOC);
 
     result = searcher.search(new MatchAllDocsQuery(), null, 500, sort).scoreDocs;
 
@@ -337,8 +338,8 @@ public class TestSort extends LuceneTest
 
 
     sort.setSort (new SortField ("parser", new FieldCache.IntParser(){
-      public final int parseInt(final String val) {
-        return (val.charAt(0)-'A') * 123456;
+      public final int parseInt(final BytesRef term) {
+        return (term.bytes[term.offset]-'A') * 123456;
       }
     }), SortField.FIELD_DOC );
     assertMatches (full, queryA, sort, "JIHGFEDCBA");
@@ -346,8 +347,8 @@ public class TestSort extends LuceneTest
     fc.purgeAllCaches();
 
     sort.setSort (new SortField ("parser", new FieldCache.FloatParser(){
-      public final float parseFloat(final String val) {
-        return (float) Math.sqrt( val.charAt(0) );
+      public final float parseFloat(final BytesRef term) {
+        return (float) Math.sqrt( term.bytes[term.offset] );
       }
     }), SortField.FIELD_DOC );
     assertMatches (full, queryA, sort, "JIHGFEDCBA");
@@ -355,8 +356,8 @@ public class TestSort extends LuceneTest
     fc.purgeAllCaches();
 
     sort.setSort (new SortField ("parser", new FieldCache.LongParser(){
-      public final long parseLong(final String val) {
-        return (val.charAt(0)-'A') * 1234567890L;
+      public final long parseLong(final BytesRef term) {
+        return (term.bytes[term.offset]-'A') * 1234567890L;
       }
     }), SortField.FIELD_DOC );
     assertMatches (full, queryA, sort, "JIHGFEDCBA");
@@ -364,8 +365,8 @@ public class TestSort extends LuceneTest
     fc.purgeAllCaches();
 
     sort.setSort (new SortField ("parser", new FieldCache.DoubleParser(){
-      public final double parseDouble(final String val) {
-        return Math.pow( val.charAt(0), (val.charAt(0)-'A') );
+      public final double parseDouble(final BytesRef term) {
+        return Math.pow( term.bytes[term.offset], (term.bytes[term.offset]-'A') );
       }
     }), SortField.FIELD_DOC );
     assertMatches (full, queryA, sort, "JIHGFEDCBA");
@@ -373,8 +374,8 @@ public class TestSort extends LuceneTest
     fc.purgeAllCaches();
 
     sort.setSort (new SortField ("parser", new FieldCache.ByteParser(){
-      public final byte parseByte(final String val) {
-        return (byte) (val.charAt(0)-'A');
+      public final byte parseByte(final BytesRef term) {
+        return (byte) (term.bytes[term.offset]-'A');
       }
     }), SortField.FIELD_DOC );
     assertMatches (full, queryA, sort, "JIHGFEDCBA");
@@ -382,8 +383,8 @@ public class TestSort extends LuceneTest
     fc.purgeAllCaches();
 
     sort.setSort (new SortField ("parser", new FieldCache.ShortParser(){
-      public final short parseShort(final String val) {
-        return (short) (val.charAt(0)-'A');
+      public final short parseShort(final BytesRef term) {
+        return (short) (term.bytes[term.offset]-'A');
       }
     }), SortField.FIELD_DOC );
     assertMatches (full, queryA, sort, "JIHGFEDCBA");
@@ -443,8 +444,8 @@ public class TestSort extends LuceneTest
     @Override
     public void setNextReader(IndexReader reader, int docBase) throws IOException {
       docValues = FieldCache.DEFAULT.getInts(reader, "parser", new FieldCache.IntParser() {
-          public final int parseInt(final String val) {
-            return (val.charAt(0)-'A') * 123456;
+          public final int parseInt(final BytesRef term) {
+            return (term.bytes[term.offset]-'A') * 123456;
           }
         });
     }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java Tue Apr  6 19:19:27 2010
@@ -92,6 +92,25 @@ public class TestTermRangeQuery extends 
     assertEquals("C added - A, B, C in range", 3, hits.length);
     searcher.close();
   }
+  
+  public void testAllDocs() throws Exception {
+    initializeIndex(new String[]{"A", "B", "C", "D"});
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    TermRangeQuery query = new TermRangeQuery("content", null, null, true, true);
+    assertFalse(query.getTermsEnum(searcher.getIndexReader()) instanceof TermRangeTermsEnum);
+    assertEquals(4, searcher.search(query, null, 1000).scoreDocs.length);
+    query = new TermRangeQuery("content", null, null, false, false);
+    assertFalse(query.getTermsEnum(searcher.getIndexReader()) instanceof TermRangeTermsEnum);
+    assertEquals(4, searcher.search(query, null, 1000).scoreDocs.length);
+    query = new TermRangeQuery("content", "", null, true, false);
+    assertFalse(query.getTermsEnum(searcher.getIndexReader()) instanceof TermRangeTermsEnum);
+    assertEquals(4, searcher.search(query, null, 1000).scoreDocs.length);
+    // and now anothe one
+    query = new TermRangeQuery("content", "B", null, true, false);
+    assertTrue(query.getTermsEnum(searcher.getIndexReader()) instanceof TermRangeTermsEnum);
+    assertEquals(3, searcher.search(query, null, 1000).scoreDocs.length);
+    searcher.close();
+  }
 
   /** This test should not be here, but it tests the fuzzy query rewrite mode (TOP_TERMS_SCORING_BOOLEAN_REWRITE)
    * with constant score and checks, that only the lower end of terms is put into the range */
@@ -402,4 +421,9 @@ public class TestTermRangeQuery extends 
     //assertEquals("C added => A,B,<empty string>,C in range", 3, hits.length());
      searcher.close();
   }
+  
+  @Deprecated
+  public void testBackwardsLayer() {
+    assertTrue(new TermRangeQuery("dummy", null, null, true, true).hasNewAPI);
+  }
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermScorer.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermScorer.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermScorer.java Tue Apr  6 19:19:27 2010
@@ -71,9 +71,8 @@ public class TestTermScorer extends Luce
 
         Weight weight = termQuery.weight(indexSearcher);
 
-        TermScorer ts = new TermScorer(weight,
-                                       indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
-                                       indexReader.norms(FIELD));
+        Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
+                                  true, true);
         //we have 2 documents with the term all in them, one document for all the other values
         final List<TestHit> docs = new ArrayList<TestHit>();
         //must call next first
@@ -137,9 +136,8 @@ public class TestTermScorer extends Luce
 
         Weight weight = termQuery.weight(indexSearcher);
 
-        TermScorer ts = new TermScorer(weight,
-                                       indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
-                                       indexReader.norms(FIELD));
+        Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
+                                  true, true);
         assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
         assertTrue("score is not correct", ts.score() == 1.6931472f);
         assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -147,16 +145,15 @@ public class TestTermScorer extends Luce
         assertTrue("next returned a doc and it should not have", ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
     }
 
-    public void testSkipTo() throws Exception {
+    public void testAdvance() throws Exception {
 
         Term allTerm = new Term(FIELD, "all");
         TermQuery termQuery = new TermQuery(allTerm);
 
         Weight weight = termQuery.weight(indexSearcher);
 
-        TermScorer ts = new TermScorer(weight,
-                                       indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
-                                       indexReader.norms(FIELD));
+        Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
+                                  true, true);
         assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
         //The next doc should be doc 5
         assertTrue("doc should be number 5", ts.docID() == 5);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestWildcard.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestWildcard.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestWildcard.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestWildcard.java Tue Apr  6 19:19:27 2010
@@ -24,6 +24,7 @@ import org.apache.lucene.document.Docume
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
@@ -121,30 +122,12 @@ public class TestWildcard
     MultiTermQuery wq = new WildcardQuery(new Term("field", "prefix*"));
     assertMatches(searcher, wq, 2);
     
-    MultiTermQuery expected = new PrefixQuery(new Term("field", "prefix"));
-    wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
-    wq.setBoost(0.1F);
-    expected.setRewriteMethod(wq.getRewriteMethod());
-    expected.setBoost(wq.getBoost());
-    assertEquals(searcher.rewrite(expected), searcher.rewrite(wq));
-    
-    wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
-    wq.setBoost(0.2F);
-    expected.setRewriteMethod(wq.getRewriteMethod());
-    expected.setBoost(wq.getBoost());
-    assertEquals(searcher.rewrite(expected), searcher.rewrite(wq));
-    
-    wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
-    wq.setBoost(0.3F);
-    expected.setRewriteMethod(wq.getRewriteMethod());
-    expected.setBoost(wq.getBoost());
-    assertEquals(searcher.rewrite(expected), searcher.rewrite(wq));
+    assertTrue(wq.getTermsEnum(searcher.getIndexReader()) instanceof PrefixTermsEnum);
     
-    wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
-    wq.setBoost(0.4F);
-    expected.setRewriteMethod(wq.getRewriteMethod());
-    expected.setBoost(wq.getBoost());
-    assertEquals(searcher.rewrite(expected), searcher.rewrite(wq));
+    wq = new WildcardQuery(new Term("field", "*"));
+    assertMatches(searcher, wq, 2);
+    assertFalse(wq.getTermsEnum(searcher.getIndexReader()) instanceof PrefixTermsEnum);
+    assertFalse(wq.getTermsEnum(searcher.getIndexReader()) instanceof AutomatonTermsEnum);
   }
 
   /**
@@ -326,5 +309,62 @@ public class TestWildcard
 
     searcher.close();
   }
+  @Deprecated
+  private static final class OldWildcardQuery extends MultiTermQuery {
+    final Term term;
+  
+    OldWildcardQuery(Term term) {
+      this.term = term;
+    }
+      
+    @Override
+    protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
+      return new WildcardTermEnum(reader, term);
+    }
+    
+    @Override
+    public String toString(String field) {
+      return "OldWildcard(" + term.toString()+ ")";
+    }
+  }
+  
+  @Deprecated
+  public void testDeprecatedTermEnum() throws Exception {
+    RAMDirectory indexStore = getIndexStore("body", new String[]
+    {"metal", "metals"});
+    IndexSearcher searcher = new IndexSearcher(indexStore, true);
+    Query query1 = new TermQuery(new Term("body", "metal"));
+    Query query2 = new OldWildcardQuery(new Term("body", "metal*"));
+    Query query3 = new OldWildcardQuery(new Term("body", "m*tal"));
+    Query query4 = new OldWildcardQuery(new Term("body", "m*tal*"));
+    Query query5 = new OldWildcardQuery(new Term("body", "m*tals"));
+
+    BooleanQuery query6 = new BooleanQuery();
+    query6.add(query5, BooleanClause.Occur.SHOULD);
+
+    BooleanQuery query7 = new BooleanQuery();
+    query7.add(query3, BooleanClause.Occur.SHOULD);
+    query7.add(query5, BooleanClause.Occur.SHOULD);
+
+    // Queries do not automatically lower-case search terms:
+    Query query8 = new OldWildcardQuery(new Term("body", "M*tal*"));
+
+    assertMatches(searcher, query1, 1);
+    assertMatches(searcher, query2, 2);
+    assertMatches(searcher, query3, 1);
+    assertMatches(searcher, query4, 2);
+    assertMatches(searcher, query5, 1);
+    assertMatches(searcher, query6, 1);
+    assertMatches(searcher, query7, 2);
+    assertMatches(searcher, query8, 0);
+    assertMatches(searcher, new OldWildcardQuery(new Term("body", "*tall")), 0);
+    assertMatches(searcher, new OldWildcardQuery(new Term("body", "*tal")), 1);
+    assertMatches(searcher, new OldWildcardQuery(new Term("body", "*tal*")), 2);
+  }
   
+  @Deprecated
+  public void testBackwardsLayer() {
+    assertTrue(new WildcardQuery(new Term("body", "metal*")).hasNewAPI);
+    assertFalse(new OldWildcardQuery(new Term("body", "metal*")).hasNewAPI);
+  }
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java Tue Apr  6 19:19:27 2010
@@ -62,9 +62,9 @@ public class TestOrdValues extends Funct
     IndexSearcher s = new IndexSearcher(dir, true);
     ValueSource vs;
     if (inOrder) {
-      vs = new OrdFieldSource(field);
+      vs = new MultiValueSource(new OrdFieldSource(field));
     } else {
-      vs = new ReverseOrdFieldSource(field);
+      vs = new MultiValueSource(new ReverseOrdFieldSource(field));
     }
 
     Query q = new ValueSourceQuery(vs);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockRAMDirectory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockRAMDirectory.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockRAMDirectory.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockRAMDirectory.java Tue Apr  6 19:19:27 2010
@@ -205,8 +205,10 @@ public class MockRAMDirectory extends RA
     if (crashed)
       throw new IOException("cannot createOutput after crash");
     init();
-    if (preventDoubleWrite && createdFiles.contains(name) && !name.equals("segments.gen"))
-      throw new IOException("file \"" + name + "\" was already written to");
+    synchronized(this) {
+      if (preventDoubleWrite && createdFiles.contains(name) && !name.equals("segments.gen"))
+        throw new IOException("file \"" + name + "\" was already written to");
+    }
     if (noDeleteOpenFile && openFiles.containsKey(name))
       throw new IOException("MockRAMDirectory: file \"" + name + "\" is still open: cannot overwrite");
     RAMFile file = new RAMFile(this);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java Tue Apr  6 19:19:27 2010
@@ -27,27 +27,27 @@ public class TestAttributeSource extends
   public void testCaptureState() {
     // init a first instance
     AttributeSource src = new AttributeSource();
-    TermAttribute termAtt = src.addAttribute(TermAttribute.class);
+    CharTermAttribute termAtt = src.addAttribute(CharTermAttribute.class);
     TypeAttribute typeAtt = src.addAttribute(TypeAttribute.class);
-    termAtt.setTermBuffer("TestTerm");
+    termAtt.append("TestTerm");
     typeAtt.setType("TestType");
     final int hashCode = src.hashCode();
     
     AttributeSource.State state = src.captureState();
     
     // modify the attributes
-    termAtt.setTermBuffer("AnotherTestTerm");
+    termAtt.setEmpty().append("AnotherTestTerm");
     typeAtt.setType("AnotherTestType");
     assertTrue("Hash code should be different", hashCode != src.hashCode());
     
     src.restoreState(state);
-    assertEquals("TestTerm", termAtt.term());
+    assertEquals("TestTerm", termAtt.toString());
     assertEquals("TestType", typeAtt.type());
     assertEquals("Hash code should be equal after restore", hashCode, src.hashCode());
 
     // restore into an exact configured copy
     AttributeSource copy = new AttributeSource();
-    copy.addAttribute(TermAttribute.class);
+    copy.addAttribute(CharTermAttribute.class);
     copy.addAttribute(TypeAttribute.class);
     copy.restoreState(state);
     assertEquals("Both AttributeSources should have same hashCode after restore", src.hashCode(), copy.hashCode());
@@ -57,17 +57,17 @@ public class TestAttributeSource extends
     AttributeSource src2 = new AttributeSource();
     typeAtt = src2.addAttribute(TypeAttribute.class);
     FlagsAttribute flagsAtt = src2.addAttribute(FlagsAttribute.class);
-    termAtt = src2.addAttribute(TermAttribute.class);
+    termAtt = src2.addAttribute(CharTermAttribute.class);
     flagsAtt.setFlags(12345);
 
     src2.restoreState(state);
-    assertEquals("TestTerm", termAtt.term());
+    assertEquals("TestTerm", termAtt.toString());
     assertEquals("TestType", typeAtt.type());
     assertEquals("FlagsAttribute should not be touched", 12345, flagsAtt.getFlags());
 
     // init a third instance missing one Attribute
     AttributeSource src3 = new AttributeSource();
-    termAtt = src3.addAttribute(TermAttribute.class);
+    termAtt = src3.addAttribute(CharTermAttribute.class);
     try {
       src3.restoreState(state);
       fail("The third instance is missing the TypeAttribute, so restoreState() should throw IllegalArgumentException");
@@ -78,42 +78,42 @@ public class TestAttributeSource extends
   
   public void testCloneAttributes() {
     final AttributeSource src = new AttributeSource();
-    final TermAttribute termAtt = src.addAttribute(TermAttribute.class);
+    final FlagsAttribute flagsAtt = src.addAttribute(FlagsAttribute.class);
     final TypeAttribute typeAtt = src.addAttribute(TypeAttribute.class);
-    termAtt.setTermBuffer("TestTerm");
+    flagsAtt.setFlags(1234);
     typeAtt.setType("TestType");
     
     final AttributeSource clone = src.cloneAttributes();
     final Iterator<Class<? extends Attribute>> it = clone.getAttributeClassesIterator();
-    assertEquals("TermAttribute must be the first attribute", TermAttribute.class, it.next());
+    assertEquals("FlagsAttribute must be the first attribute", FlagsAttribute.class, it.next());
     assertEquals("TypeAttribute must be the second attribute", TypeAttribute.class, it.next());
     assertFalse("No more attributes", it.hasNext());
     
-    final TermAttribute termAtt2 = clone.getAttribute(TermAttribute.class);
+    final FlagsAttribute flagsAtt2 = clone.getAttribute(FlagsAttribute.class);
     final TypeAttribute typeAtt2 = clone.getAttribute(TypeAttribute.class);
-    assertNotSame("TermAttribute of original and clone must be different instances", termAtt2, termAtt);
+    assertNotSame("FlagsAttribute of original and clone must be different instances", flagsAtt2, flagsAtt);
     assertNotSame("TypeAttribute of original and clone must be different instances", typeAtt2, typeAtt);
-    assertEquals("TermAttribute of original and clone must be equal", termAtt2, termAtt);
+    assertEquals("FlagsAttribute of original and clone must be equal", flagsAtt2, flagsAtt);
     assertEquals("TypeAttribute of original and clone must be equal", typeAtt2, typeAtt);
     
     // test copy back
-    termAtt2.setTermBuffer("OtherTerm");
+    flagsAtt2.setFlags(4711);
     typeAtt2.setType("OtherType");
     clone.copyTo(src);
-    assertEquals("TermAttribute of original must now contain updated term", "OtherTerm", termAtt.term());
+    assertEquals("FlagsAttribute of original must now contain updated term", 4711, flagsAtt.getFlags());
     assertEquals("TypeAttribute of original must now contain updated type", "OtherType", typeAtt.type());
     // verify again:
-    assertNotSame("TermAttribute of original and clone must be different instances", termAtt2, termAtt);
+    assertNotSame("FlagsAttribute of original and clone must be different instances", flagsAtt2, flagsAtt);
     assertNotSame("TypeAttribute of original and clone must be different instances", typeAtt2, typeAtt);
-    assertEquals("TermAttribute of original and clone must be equal", termAtt2, termAtt);
+    assertEquals("FlagsAttribute of original and clone must be equal", flagsAtt2, flagsAtt);
     assertEquals("TypeAttribute of original and clone must be equal", typeAtt2, typeAtt);
   }
   
   public void testToStringAndMultiAttributeImplementations() {
     AttributeSource src = new AttributeSource();
-    TermAttribute termAtt = src.addAttribute(TermAttribute.class);
+    CharTermAttribute termAtt = src.addAttribute(CharTermAttribute.class);
     TypeAttribute typeAtt = src.addAttribute(TypeAttribute.class);
-    termAtt.setTermBuffer("TestTerm");
+    termAtt.append("TestTerm");
     typeAtt.setType("TestType");    
     assertEquals("Attributes should appear in original order", "("+termAtt.toString()+","+typeAtt.toString()+")", src.toString());
     Iterator<AttributeImpl> it = src.getAttributeImplsIterator();
@@ -125,23 +125,23 @@ public class TestAttributeSource extends
 
     src = new AttributeSource();
     src.addAttributeImpl(new Token());
-    // this should not add a new attribute as Token implements TermAttribute, too
-    termAtt = src.addAttribute(TermAttribute.class);
-    assertTrue("TermAttribute should be implemented by Token", termAtt instanceof Token);
+    // this should not add a new attribute as Token implements CharTermAttribute, too
+    termAtt = src.addAttribute(CharTermAttribute.class);
+    assertTrue("CharTermAttribute should be implemented by Token", termAtt instanceof Token);
     // get the Token attribute and check, that it is the only one
     it = src.getAttributeImplsIterator();
     Token tok = (Token) it.next();
     assertFalse("There should be only one attribute implementation instance", it.hasNext());
     
-    termAtt.setTermBuffer("TestTerm");
+    termAtt.setEmpty().append("TestTerm");
     assertEquals("Token should only printed once", "("+tok.toString()+")", src.toString());
   }
   
   public void testDefaultAttributeFactory() throws Exception {
     AttributeSource src = new AttributeSource();
     
-    assertTrue("TermAttribute is not implemented by TermAttributeImpl",
-      src.addAttribute(TermAttribute.class) instanceof TermAttributeImpl);
+    assertTrue("CharTermAttribute is not implemented by CharTermAttributeImpl",
+      src.addAttribute(CharTermAttribute.class) instanceof CharTermAttributeImpl);
     assertTrue("OffsetAttribute is not implemented by OffsetAttributeImpl",
       src.addAttribute(OffsetAttribute.class) instanceof OffsetAttributeImpl);
     assertTrue("FlagsAttribute is not implemented by FlagsAttributeImpl",

Propchange: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  6 19:19:27 2010
@@ -1,2 +1,3 @@
-/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/util/TestAttributeSource.java:896850,909334
-/lucene/java/trunk/src/test/org/apache/lucene/util/TestAttributeSource.java:924483-925561
+/lucene/java/branches/flex_1458/src/test/org/apache/lucene/util/TestAttributeSource.java:824912-931101
+/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/util/TestAttributeSource.java:909334
+/lucene/java/trunk/src/test/org/apache/lucene/util/TestAttributeSource.java:924483-924731,924781,925176-925462

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestNumericUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestNumericUtils.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestNumericUtils.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestNumericUtils.java Tue Apr  6 19:19:27 2010
@@ -25,33 +25,37 @@ public class TestNumericUtils extends Lu
 
   public void testLongConversionAndOrdering() throws Exception {
     // generate a series of encoded longs, each numerical one bigger than the one before
-    String last=null;
+    BytesRef last=null, act=new BytesRef(NumericUtils.BUF_SIZE_LONG);
     for (long l=-100000L; l<100000L; l++) {
-      String act=NumericUtils.longToPrefixCoded(l);
+      NumericUtils.longToPrefixCoded(l, 0, act);
       if (last!=null) {
         // test if smaller
-        assertTrue("actual bigger than last", last.compareTo(act) < 0 );
+        assertTrue("actual bigger than last (BytesRef)", BytesRef.getUTF8SortedAsUTF16Comparator().compare(last, act) < 0 );
+        assertTrue("actual bigger than last (as String)", last.utf8ToString().compareTo(act.utf8ToString()) < 0 );
       }
       // test is back and forward conversion works
       assertEquals("forward and back conversion should generate same long", l, NumericUtils.prefixCodedToLong(act));
       // next step
-      last=act;
+      last = act;
+      act = new BytesRef(NumericUtils.BUF_SIZE_LONG);
     }
   }
 
   public void testIntConversionAndOrdering() throws Exception {
     // generate a series of encoded ints, each numerical one bigger than the one before
-    String last=null;
+    BytesRef last=null, act=new BytesRef(NumericUtils.BUF_SIZE_INT);
     for (int i=-100000; i<100000; i++) {
-      String act=NumericUtils.intToPrefixCoded(i);
+      NumericUtils.intToPrefixCoded(i, 0, act);
       if (last!=null) {
         // test if smaller
-        assertTrue("actual bigger than last", last.compareTo(act) < 0 );
+        assertTrue("actual bigger than last (BytesRef)", BytesRef.getUTF8SortedAsUTF16Comparator().compare(last, act) < 0 );
+        assertTrue("actual bigger than last (as String)", last.utf8ToString().compareTo(act.utf8ToString()) < 0 );
       }
       // test is back and forward conversion works
       assertEquals("forward and back conversion should generate same int", i, NumericUtils.prefixCodedToInt(act));
       // next step
       last=act;
+      act = new BytesRef(NumericUtils.BUF_SIZE_INT);
     }
   }
 
@@ -60,10 +64,11 @@ public class TestNumericUtils extends Lu
       Long.MIN_VALUE, Long.MIN_VALUE+1, Long.MIN_VALUE+2, -5003400000000L,
       -4000L, -3000L, -2000L, -1000L, -1L, 0L, 1L, 10L, 300L, 50006789999999999L, Long.MAX_VALUE-2, Long.MAX_VALUE-1, Long.MAX_VALUE
     };
-    String[] prefixVals=new String[vals.length];
+    BytesRef[] prefixVals=new BytesRef[vals.length];
     
     for (int i=0; i<vals.length; i++) {
-      prefixVals[i]=NumericUtils.longToPrefixCoded(vals[i]);
+      prefixVals[i] = new BytesRef(NumericUtils.BUF_SIZE_LONG);
+      NumericUtils.longToPrefixCoded(vals[i], 0, prefixVals[i]);
       
       // check forward and back conversion
       assertEquals( "forward and back conversion should generate same long", vals[i], NumericUtils.prefixCodedToLong(prefixVals[i]) );
@@ -79,13 +84,15 @@ public class TestNumericUtils extends Lu
     
     // check sort order (prefixVals should be ascending)
     for (int i=1; i<prefixVals.length; i++) {
-      assertTrue( "check sort order", prefixVals[i-1].compareTo( prefixVals[i] ) < 0 );
+      assertTrue( "check sort order", BytesRef.getUTF8SortedAsUTF16Comparator().compare(prefixVals[i-1], prefixVals[i] ) < 0 );
     }
         
     // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+    final BytesRef ref = new BytesRef(NumericUtils.BUF_SIZE_LONG);
     for (int i=0; i<vals.length; i++) {
       for (int j=0; j<64; j++) {
-        long prefixVal=NumericUtils.prefixCodedToLong(NumericUtils.longToPrefixCoded(vals[i], j));
+        NumericUtils.longToPrefixCoded(vals[i], j, ref);
+        long prefixVal=NumericUtils.prefixCodedToLong(ref);
         long mask=(1L << j) - 1L;
         assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
       }
@@ -97,10 +104,11 @@ public class TestNumericUtils extends Lu
       Integer.MIN_VALUE, Integer.MIN_VALUE+1, Integer.MIN_VALUE+2, -64765767,
       -4000, -3000, -2000, -1000, -1, 0, 1, 10, 300, 765878989, Integer.MAX_VALUE-2, Integer.MAX_VALUE-1, Integer.MAX_VALUE
     };
-    String[] prefixVals=new String[vals.length];
+    BytesRef[] prefixVals=new BytesRef[vals.length];
     
     for (int i=0; i<vals.length; i++) {
-      prefixVals[i]=NumericUtils.intToPrefixCoded(vals[i]);
+      prefixVals[i] = new BytesRef(NumericUtils.BUF_SIZE_INT);
+      NumericUtils.intToPrefixCoded(vals[i], 0, prefixVals[i]);
       
       // check forward and back conversion
       assertEquals( "forward and back conversion should generate same int", vals[i], NumericUtils.prefixCodedToInt(prefixVals[i]) );
@@ -116,13 +124,15 @@ public class TestNumericUtils extends Lu
     
     // check sort order (prefixVals should be ascending)
     for (int i=1; i<prefixVals.length; i++) {
-      assertTrue( "check sort order", prefixVals[i-1].compareTo( prefixVals[i] ) < 0 );
+      assertTrue( "check sort order", BytesRef.getUTF8SortedAsUTF16Comparator().compare(prefixVals[i-1], prefixVals[i] ) < 0 );
     }
     
     // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+    final BytesRef ref = new BytesRef(NumericUtils.BUF_SIZE_LONG);
     for (int i=0; i<vals.length; i++) {
       for (int j=0; j<32; j++) {
-        int prefixVal=NumericUtils.prefixCodedToInt(NumericUtils.intToPrefixCoded(vals[i], j));
+        NumericUtils.intToPrefixCoded(vals[i], j, ref);
+        int prefixVal=NumericUtils.prefixCodedToInt(ref);
         int mask=(1 << j) - 1;
         assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
       }

Modified: lucene/dev/trunk/solr/src/java/org/apache/solr/handler/AnalysisRequestHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/java/org/apache/solr/handler/AnalysisRequestHandler.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/java/org/apache/solr/handler/AnalysisRequestHandler.java (original)
+++ lucene/dev/trunk/solr/src/java/org/apache/solr/handler/AnalysisRequestHandler.java Tue Apr  6 19:19:27 2010
@@ -20,12 +20,8 @@ import org.apache.commons.io.IOUtils;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.util.BytesRef;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.SolrParams;
@@ -139,15 +135,29 @@ public class AnalysisRequestHandler exte
     // outer is namedList since order of tokens is important
     NamedList<NamedList<Object>> tokens = new NamedList<NamedList<Object>>();
     // TODO: support custom attributes
-    TermAttribute termAtt = (TermAttribute) tstream.addAttribute(TermAttribute.class);
-    OffsetAttribute offsetAtt = (OffsetAttribute) tstream.addAttribute(OffsetAttribute.class);
-    TypeAttribute typeAtt = (TypeAttribute) tstream.addAttribute(TypeAttribute.class);
-    PositionIncrementAttribute posIncAtt = (PositionIncrementAttribute) tstream.addAttribute(PositionIncrementAttribute.class);
+    TermAttribute termAtt = null;
+    TermToBytesRefAttribute bytesAtt = null;
+    if (tstream.hasAttribute(TermAttribute.class)) {
+      termAtt = tstream.getAttribute(TermAttribute.class);
+    } else if (tstream.hasAttribute(TermToBytesRefAttribute.class)) {
+      bytesAtt = tstream.getAttribute(TermToBytesRefAttribute.class);
+    }
+    final OffsetAttribute offsetAtt = tstream.addAttribute(OffsetAttribute.class);
+    final TypeAttribute typeAtt = tstream.addAttribute(TypeAttribute.class);
+    final PositionIncrementAttribute posIncAtt = tstream.addAttribute(PositionIncrementAttribute.class);
     
+    final BytesRef bytes = new BytesRef();
     while (tstream.incrementToken()) {
       NamedList<Object> token = new SimpleOrderedMap<Object>();
       tokens.add("token", token);
-      token.add("value", new String(termAtt.termBuffer(), 0, termAtt.termLength()));
+      if (termAtt != null) {
+        token.add("value", termAtt.term());
+      }
+      if (bytesAtt != null) {
+        bytesAtt.toBytesRef(bytes);
+        // TODO: This is incorrect when numeric fields change in later lucene versions. It should use BytesRef directly!
+        token.add("value", bytes.utf8ToString());
+      }
       token.add("start", offsetAtt.startOffset());
       token.add("end", offsetAtt.endOffset());
       token.add("posInc", posIncAtt.getPositionIncrement());

Modified: lucene/dev/trunk/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (original)
+++ lucene/dev/trunk/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java Tue Apr  6 19:19:27 2010
@@ -22,12 +22,8 @@ import org.apache.lucene.analysis.CharRe
 import org.apache.lucene.analysis.CharStream;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.util.BytesRef;
 import org.apache.solr.analysis.CharFilterFactory;
 import org.apache.solr.analysis.TokenFilterFactory;
 import org.apache.solr.analysis.TokenizerChain;
@@ -147,25 +143,33 @@ public abstract class AnalysisRequestHan
    */
   private List<Token> analyzeTokenStream(TokenStream tokenStream) {
     List<Token> tokens = new ArrayList<Token>();
-
+    
     // TODO change this API to support custom attributes
-    TermAttribute termAtt = (TermAttribute) 
-      tokenStream.addAttribute(TermAttribute.class);
-    OffsetAttribute offsetAtt = (OffsetAttribute) 
-      tokenStream.addAttribute(OffsetAttribute.class);
-    TypeAttribute typeAtt = (TypeAttribute) 
-      tokenStream.addAttribute(TypeAttribute.class);
-    FlagsAttribute flagsAtt = (FlagsAttribute) 
-      tokenStream.addAttribute(FlagsAttribute.class);
-    PayloadAttribute payloadAtt = (PayloadAttribute) 
-      tokenStream.addAttribute(PayloadAttribute.class);
-    PositionIncrementAttribute posIncAtt = (PositionIncrementAttribute) 
-      tokenStream.addAttribute(PositionIncrementAttribute.class);
+    TermAttribute termAtt = null;
+    TermToBytesRefAttribute bytesAtt = null;
+    if (tokenStream.hasAttribute(TermAttribute.class)) {
+      termAtt = tokenStream.getAttribute(TermAttribute.class);
+    } else if (tokenStream.hasAttribute(TermToBytesRefAttribute.class)) {
+      bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
+    }
+    final OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
+    final TypeAttribute typeAtt = tokenStream.addAttribute(TypeAttribute.class);
+    final PositionIncrementAttribute posIncAtt = tokenStream.addAttribute(PositionIncrementAttribute.class);
+    final FlagsAttribute flagsAtt = tokenStream.addAttribute(FlagsAttribute.class);
+    final PayloadAttribute payloadAtt = tokenStream.addAttribute(PayloadAttribute.class);
     
+    final BytesRef bytes = new BytesRef();
     try {
       while (tokenStream.incrementToken()) {
         Token token = new Token();
-        token.setTermBuffer(termAtt.termBuffer(), 0, termAtt.termLength());
+        if (termAtt != null) {
+          token.setTermBuffer(termAtt.term());
+        }
+        if (bytesAtt != null) {
+          bytesAtt.toBytesRef(bytes);
+          // TODO: This is incorrect when numeric fields change in later lucene versions. It should use BytesRef directly!
+          token.setTermBuffer(bytes.utf8ToString());
+        }
         token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
         token.setType(typeAtt.type());
         token.setFlags(flagsAtt.getFlags());

Modified: lucene/dev/trunk/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java (original)
+++ lucene/dev/trunk/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java Tue Apr  6 19:19:27 2010
@@ -23,6 +23,7 @@ import java.util.*;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.UnicodeUtil;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.util.NamedList;
@@ -80,12 +81,12 @@ public class PHPSerializedResponseWriter
 
 class PHPSerializedWriter extends JSONWriter {
   final private boolean CESU8;
-  final UnicodeUtil.UTF8Result utf8;
+  final BytesRef utf8;
 
   public PHPSerializedWriter(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp, boolean CESU8) {
     super(writer, req, rsp);
     this.CESU8 = CESU8;
-    this.utf8 = CESU8 ? null : new UnicodeUtil.UTF8Result();
+    this.utf8 = CESU8 ? null : new BytesRef(10);
     // never indent serialized PHP data
     doIndent = false;
   }

Modified: lucene/dev/trunk/solr/src/java/org/apache/solr/schema/TrieDateField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/java/org/apache/solr/schema/TrieDateField.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/java/org/apache/solr/schema/TrieDateField.java (original)
+++ lucene/dev/trunk/solr/src/java/org/apache/solr/schema/TrieDateField.java Tue Apr  6 19:19:27 2010
@@ -32,6 +32,7 @@ import org.apache.lucene.search.SortFiel
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.NumericTokenStream;
@@ -126,7 +127,10 @@ public class TrieDateField extends DateF
 
   @Override
   public String readableToIndexed(String val) {  
-    return NumericUtils.longToPrefixCoded(super.parseMath(null, val).getTime());
+    // TODO: Numeric should never be handled as String, that may break in future lucene versions! Change to use BytesRef for term texts!
+    BytesRef bytes = new BytesRef(NumericUtils.BUF_SIZE_LONG);
+    NumericUtils.longToPrefixCoded(super.parseMath(null, val).getTime(), 0, bytes);
+    return bytes.utf8ToString();
   }
 
   @Override
@@ -142,7 +146,8 @@ public class TrieDateField extends DateF
   }
 
   @Override
-  public String indexedToReadable(String indexedForm) {
+  public String indexedToReadable(String _indexedForm) {
+    final BytesRef indexedForm = new BytesRef(_indexedForm);
     return super.toExternal( new Date(NumericUtils.prefixCodedToLong(indexedForm)) );
   }
 

Modified: lucene/dev/trunk/solr/src/java/org/apache/solr/schema/TrieField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/java/org/apache/solr/schema/TrieField.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/java/org/apache/solr/schema/TrieField.java (original)
+++ lucene/dev/trunk/solr/src/java/org/apache/solr/schema/TrieField.java Tue Apr  6 19:19:27 2010
@@ -19,6 +19,7 @@ package org.apache.solr.schema;
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.NumericTokenStream;
@@ -322,20 +323,28 @@ public class TrieField extends FieldType
 
   @Override
   public String readableToIndexed(String val) {
+    // TODO: Numeric should never be handled as String, that may break in future lucene versions! Change to use BytesRef for term texts!
+    BytesRef bytes = new BytesRef(NumericUtils.BUF_SIZE_LONG);
     switch (type) {
       case INTEGER:
-        return NumericUtils.intToPrefixCoded(Integer.parseInt(val));
+        NumericUtils.intToPrefixCoded(Integer.parseInt(val), 0, bytes);
+        break;
       case FLOAT:
-        return NumericUtils.intToPrefixCoded(NumericUtils.floatToSortableInt(Float.parseFloat(val)));
+        NumericUtils.intToPrefixCoded(NumericUtils.floatToSortableInt(Float.parseFloat(val)), 0, bytes);
+        break;
       case LONG:
-        return NumericUtils.longToPrefixCoded(Long.parseLong(val));
+        NumericUtils.longToPrefixCoded(Long.parseLong(val), 0, bytes);
+        break;
       case DOUBLE:
-        return NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(Double.parseDouble(val)));
+        NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(Double.parseDouble(val)), 0, bytes);
+        break;
       case DATE:
-        return NumericUtils.longToPrefixCoded(dateField.parseMath(null, val).getTime());
+        NumericUtils.longToPrefixCoded(dateField.parseMath(null, val).getTime(), 0, bytes);
+        break;
       default:
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + type);
     }
+    return bytes.utf8ToString();
   }
 
 
@@ -371,7 +380,8 @@ public class TrieField extends FieldType
   }
 
   @Override
-  public String indexedToReadable(String indexedForm) {
+  public String indexedToReadable(String _indexedForm) {
+    final BytesRef indexedForm = new BytesRef(_indexedForm);
     switch (type) {
       case INTEGER:
         return Integer.toString( NumericUtils.prefixCodedToInt(indexedForm) );

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/search/TestDocSet.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/search/TestDocSet.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/search/TestDocSet.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/search/TestDocSet.java Tue Apr  6 19:19:27 2010
@@ -355,7 +355,7 @@ public class TestDocSet extends TestCase
     return r;
   }
 
-  public IndexReader dummyMultiReader(int nSeg, int maxDoc) {
+  public IndexReader dummyMultiReader(int nSeg, int maxDoc) throws IOException {
     if (nSeg==1 && rand.nextBoolean()) return dummyIndexReader(rand.nextInt(maxDoc));
 
     IndexReader[] subs = new IndexReader[rand.nextInt(nSeg)+1];



Mime
View raw message